code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends, File
from typing import List
from ..models.database import ApiSession
from ..schemas.images_schema import ImageReturn
from . import image_service
router = APIRouter()
@router.get("/", response_model=List[ImageReturn])
def get_all_images(db: Session = Depends(ApiSession)):
return image_service.get_all_images(db)
@router.get("/{image_id}", response_model=ImageReturn)
def get_image_by_id(image_id: int, db: Session = Depends(ApiSession)):
return image_service.get_image_by_id(image_id, db)
@router.post("/name/{image_name}", response_model=List[ImageReturn])
def create_images(image_name: str, files: List[bytes] = File(...), db: Session = Depends(ApiSession)):
return image_service.create_images(image_name, files, db)
@router.delete("/{image_id}", response_model=None)
def delete_image_by_id(image_id: int, db: Session = Depends(ApiSession)):
return image_service.delete_image_by_id(image_id, db)
@router.delete("/", response_model=None)
def delete_images_by_ids(image_ids: List[int], db: Session = Depends(ApiSession)):
return image_service.delete_images_by_ids(image_ids, db)
|
normal
|
{
"blob_id": "874ca60749dba9ca8c8ebee2eecb1b80da50f11f",
"index": 3782,
"step-1": "<mask token>\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\[email protected]('/', response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session=Depends(ApiSession)\n ):\n return image_service.delete_images_by_ids(image_ids, db)\n",
"step-3": "<mask token>\nrouter = APIRouter()\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\[email protected]('/', response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session=Depends(ApiSession)\n ):\n return image_service.delete_images_by_ids(image_ids, db)\n",
"step-4": "from sqlalchemy.orm import Session\nfrom fastapi import APIRouter, Depends, File\nfrom typing import List\nfrom ..models.database import ApiSession\nfrom ..schemas.images_schema import ImageReturn\nfrom . import image_service\nrouter = APIRouter()\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\[email protected]('/', response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session=Depends(ApiSession)\n ):\n return image_service.delete_images_by_ids(image_ids, db)\n",
"step-5": "from sqlalchemy.orm import Session\nfrom fastapi import APIRouter, Depends, File\nfrom typing import List\n\nfrom ..models.database import ApiSession\nfrom ..schemas.images_schema import ImageReturn\n\nfrom . import image_service\n\nrouter = APIRouter()\n\[email protected](\"/\", response_model=List[ImageReturn])\ndef get_all_images(db: Session = Depends(ApiSession)):\n return image_service.get_all_images(db)\n\[email protected](\"/{image_id}\", response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session = Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\[email protected](\"/name/{image_name}\", response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes] = File(...), db: Session = Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\[email protected](\"/{image_id}\", response_model=None)\ndef delete_image_by_id(image_id: int, db: Session = Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\[email protected](\"/\", response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session = Depends(ApiSession)):\n return image_service.delete_images_by_ids(image_ids, db)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def make_rules(folder):
rules_dictionary = {}
try:
path = os.path.join(os.getcwd(), 'rules', 'data', folder)
files = os.listdir(path)
except:
path = os.path.join(os.getcwd(), 'data', folder)
files = os.listdir(path)
short_files_rule = re.compile('.txt')
for file in files:
if short_files_rule.search(file) != None:
class_name = re.sub('_', ' ', re.sub('\\.txt', '', file))
current_file = open(os.path.join(path, file), 'r', encoding='utf-8'
).read()
affixes = current_file.split(', ')
rules_dictionary[class_name] = affixes
return rules_dictionary
def find_affixes(rules_noun, lemma, word_possible_stress):
for stress_type, affixes in rules_noun.items():
for affix in affixes:
affix_type = ''
if re.search('^[а-яё]+\\-$', affix) != None:
regexp = '^' + affix[:-1]
affix_type = 'preffix'
elif re.search('^\\-[а-яё]+$', affix) != None:
regexp = affix[1:] + '$'
affix_type = 'suffix'
elif re.search('^[а-яё]+\\-\\.\\.\\.\\-[а-яё]+$', affix) != None:
regexp = '^' + re.sub('\\-\\.\\.\\.\\-', '.+', affix) + '$'
affix_type = 'combination'
if re.search(regexp, lemma) != None:
if stress_type in word_possible_stress:
word_possible_stress[stress_type].append((affix,
affix_type))
else:
word_possible_stress[stress_type] = [(affix, affix_type)]
return word_possible_stress
<|reserved_special_token_0|>
def find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):
possible_types = []
for stress_type, affixes in word_possible_stress.items():
for affix in affixes:
if affix[1] == 'suffix':
if affix[0] == biggest_suffix:
possible_types.append(stress_type)
elif affix[1] == 'prefix':
if affix[0] == biggest_prefix:
possible_types.append(stress_type)
elif affix[1] == 'combination':
possible_types = []
pair = affix[0].split('...')
if pair[0] == biggest_prefix and pair[1] == biggest_suffix:
possible_types.append(stress_type)
return possible_types
def make_stressed_word(possible_types, token, lemma, biggest_suffix,
original_token):
if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':
stressed_word = re.sub(
'^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'",
token)
elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'", token
)
elif possible_types[0] == 'suffix 2':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',
"\\g<1>'", token)
elif possible_types[0] == 'suffix 3':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',
"\\g<1>'", token)
elif possible_types[0] == 'presuffix':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
suffixes = re.sub(stem_cutted, '', stem)
stressed_word = re.sub(
'([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +
'.{,5})$', "\\g<1>'\\g<2>", token)
elif possible_types[0] == 'type B':
stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',
"\\g<1>'\\g<2>", token)
try:
parts = stressed_word.split("'")
stressed_word = original_token[:len(parts[0])] + "'" + original_token[
len(parts[0]):]
except:
stressed_word = original_token
return stressed_word
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_rules(folder):
rules_dictionary = {}
try:
path = os.path.join(os.getcwd(), 'rules', 'data', folder)
files = os.listdir(path)
except:
path = os.path.join(os.getcwd(), 'data', folder)
files = os.listdir(path)
short_files_rule = re.compile('.txt')
for file in files:
if short_files_rule.search(file) != None:
class_name = re.sub('_', ' ', re.sub('\\.txt', '', file))
current_file = open(os.path.join(path, file), 'r', encoding='utf-8'
).read()
affixes = current_file.split(', ')
rules_dictionary[class_name] = affixes
return rules_dictionary
def find_affixes(rules_noun, lemma, word_possible_stress):
for stress_type, affixes in rules_noun.items():
for affix in affixes:
affix_type = ''
if re.search('^[а-яё]+\\-$', affix) != None:
regexp = '^' + affix[:-1]
affix_type = 'preffix'
elif re.search('^\\-[а-яё]+$', affix) != None:
regexp = affix[1:] + '$'
affix_type = 'suffix'
elif re.search('^[а-яё]+\\-\\.\\.\\.\\-[а-яё]+$', affix) != None:
regexp = '^' + re.sub('\\-\\.\\.\\.\\-', '.+', affix) + '$'
affix_type = 'combination'
if re.search(regexp, lemma) != None:
if stress_type in word_possible_stress:
word_possible_stress[stress_type].append((affix,
affix_type))
else:
word_possible_stress[stress_type] = [(affix, affix_type)]
return word_possible_stress
<|reserved_special_token_0|>
def find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):
possible_types = []
for stress_type, affixes in word_possible_stress.items():
for affix in affixes:
if affix[1] == 'suffix':
if affix[0] == biggest_suffix:
possible_types.append(stress_type)
elif affix[1] == 'prefix':
if affix[0] == biggest_prefix:
possible_types.append(stress_type)
elif affix[1] == 'combination':
possible_types = []
pair = affix[0].split('...')
if pair[0] == biggest_prefix and pair[1] == biggest_suffix:
possible_types.append(stress_type)
return possible_types
def make_stressed_word(possible_types, token, lemma, biggest_suffix,
original_token):
if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':
stressed_word = re.sub(
'^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'",
token)
elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'", token
)
elif possible_types[0] == 'suffix 2':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',
"\\g<1>'", token)
elif possible_types[0] == 'suffix 3':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',
"\\g<1>'", token)
elif possible_types[0] == 'presuffix':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
suffixes = re.sub(stem_cutted, '', stem)
stressed_word = re.sub(
'([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +
'.{,5})$', "\\g<1>'\\g<2>", token)
elif possible_types[0] == 'type B':
stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',
"\\g<1>'\\g<2>", token)
try:
parts = stressed_word.split("'")
stressed_word = original_token[:len(parts[0])] + "'" + original_token[
len(parts[0]):]
except:
stressed_word = original_token
return stressed_word
def process_stresses(part_of_speech, rules, pos, lemma, token,
original_token, word_possible_stress, current_file):
stressed_word, biggest_suffix, possible_types = '', '', ['']
if part_of_speech in pos:
word_possible_stress = find_affixes(rules, lemma, word_possible_stress)
if word_possible_stress != {} and list(word_possible_stress.keys()
) != ['all prefixes', 'all suffixes'] and list(word_possible_stress
.keys()) != ['all suffixes'] and list(word_possible_stress.keys()
) != ['all prefixes']:
biggest_prefix, biggest_suffix, word_possible_stress = (
find_biggest_affixes(word_possible_stress))
possible_types = find_possible_types(word_possible_stress,
biggest_suffix, biggest_prefix)
if len(possible_types) == 1:
stressed_word = make_stressed_word(possible_types, token,
lemma, biggest_suffix, original_token)
current_file = re.sub(original_token, stressed_word,
current_file)
if possible_types == []:
possible_types = ['']
return current_file, stressed_word, biggest_suffix, possible_types[0]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_rules(folder):
rules_dictionary = {}
try:
path = os.path.join(os.getcwd(), 'rules', 'data', folder)
files = os.listdir(path)
except:
path = os.path.join(os.getcwd(), 'data', folder)
files = os.listdir(path)
short_files_rule = re.compile('.txt')
for file in files:
if short_files_rule.search(file) != None:
class_name = re.sub('_', ' ', re.sub('\\.txt', '', file))
current_file = open(os.path.join(path, file), 'r', encoding='utf-8'
).read()
affixes = current_file.split(', ')
rules_dictionary[class_name] = affixes
return rules_dictionary
def find_affixes(rules_noun, lemma, word_possible_stress):
for stress_type, affixes in rules_noun.items():
for affix in affixes:
affix_type = ''
if re.search('^[а-яё]+\\-$', affix) != None:
regexp = '^' + affix[:-1]
affix_type = 'preffix'
elif re.search('^\\-[а-яё]+$', affix) != None:
regexp = affix[1:] + '$'
affix_type = 'suffix'
elif re.search('^[а-яё]+\\-\\.\\.\\.\\-[а-яё]+$', affix) != None:
regexp = '^' + re.sub('\\-\\.\\.\\.\\-', '.+', affix) + '$'
affix_type = 'combination'
if re.search(regexp, lemma) != None:
if stress_type in word_possible_stress:
word_possible_stress[stress_type].append((affix,
affix_type))
else:
word_possible_stress[stress_type] = [(affix, affix_type)]
return word_possible_stress
def find_biggest_affixes(word_possible_stress):
biggest_len_suffix, biggest_len_prefix = 0, 0
biggest_suffix, biggest_prefix = '', ''
if 'all suffixes' in word_possible_stress:
for suffix in word_possible_stress['all suffixes']:
if len(suffix[0]) > biggest_len_suffix:
biggest_suffix = suffix[0]
biggest_len_suffix = len(suffix[0])
del word_possible_stress['all suffixes']
if 'all prefixes' in word_possible_stress:
for prefix in word_possible_stress['all prefixes']:
if len(prefix[0]) > biggest_len_prefix:
biggest_prefix = prefix[0]
biggest_len_prefix = len(prefix[0])
del word_possible_stress['all prefixes']
return biggest_prefix, biggest_suffix, word_possible_stress
def find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):
possible_types = []
for stress_type, affixes in word_possible_stress.items():
for affix in affixes:
if affix[1] == 'suffix':
if affix[0] == biggest_suffix:
possible_types.append(stress_type)
elif affix[1] == 'prefix':
if affix[0] == biggest_prefix:
possible_types.append(stress_type)
elif affix[1] == 'combination':
possible_types = []
pair = affix[0].split('...')
if pair[0] == biggest_prefix and pair[1] == biggest_suffix:
possible_types.append(stress_type)
return possible_types
def make_stressed_word(possible_types, token, lemma, biggest_suffix,
original_token):
if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':
stressed_word = re.sub(
'^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'",
token)
elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'", token
)
elif possible_types[0] == 'suffix 2':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',
"\\g<1>'", token)
elif possible_types[0] == 'suffix 3':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',
"\\g<1>'", token)
elif possible_types[0] == 'presuffix':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
suffixes = re.sub(stem_cutted, '', stem)
stressed_word = re.sub(
'([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +
'.{,5})$', "\\g<1>'\\g<2>", token)
elif possible_types[0] == 'type B':
stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',
"\\g<1>'\\g<2>", token)
try:
parts = stressed_word.split("'")
stressed_word = original_token[:len(parts[0])] + "'" + original_token[
len(parts[0]):]
except:
stressed_word = original_token
return stressed_word
def process_stresses(part_of_speech, rules, pos, lemma, token,
original_token, word_possible_stress, current_file):
stressed_word, biggest_suffix, possible_types = '', '', ['']
if part_of_speech in pos:
word_possible_stress = find_affixes(rules, lemma, word_possible_stress)
if word_possible_stress != {} and list(word_possible_stress.keys()
) != ['all prefixes', 'all suffixes'] and list(word_possible_stress
.keys()) != ['all suffixes'] and list(word_possible_stress.keys()
) != ['all prefixes']:
biggest_prefix, biggest_suffix, word_possible_stress = (
find_biggest_affixes(word_possible_stress))
possible_types = find_possible_types(word_possible_stress,
biggest_suffix, biggest_prefix)
if len(possible_types) == 1:
stressed_word = make_stressed_word(possible_types, token,
lemma, biggest_suffix, original_token)
current_file = re.sub(original_token, stressed_word,
current_file)
if possible_types == []:
possible_types = ['']
return current_file, stressed_word, biggest_suffix, possible_types[0]
def initialize(current_file):
morph = pymorphy2.MorphAnalyzer()
rules_noun = make_rules('NOUN')
rules_adj = make_rules('ADJ')
rules_verb = make_rules('VERB')
all_tokens = nltk.word_tokenize(current_file)
stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []
for token in all_tokens:
stressed_word, biggest_suffix, stress_type = token, '', ''
original_token = token
token = token.lower()
word_possible_stress = {}
if re.search('^[А-ЯЁа-яё\\-]+$', token) != None and token != '-':
token = re.sub('^-', '', token)
pos = morph.parse(token)[0].tag.POS
lemma = morph.parse(token)[0].normal_form
if pos != None:
(current_file, stressed_word, biggest_suffix, stress_type) = (
process_stresses('NOUN', rules_noun, pos, lemma, token,
original_token, word_possible_stress, current_file))
if biggest_suffix == '':
(current_file, stressed_word, biggest_suffix, stress_type
) = (process_stresses('ADJF', rules_adj, pos, lemma,
token, original_token, word_possible_stress,
current_file))
if biggest_suffix == '':
(current_file, stressed_word, biggest_suffix,
stress_type) = (process_stresses('VERB',
rules_verb, pos, lemma, token, original_token,
word_possible_stress, current_file))
if stressed_word == '':
stressed_word = original_token
stressed_words.append(stressed_word)
biggest_suffixes.append(biggest_suffix)
stress_types.append(stress_type)
poses.append(pos)
return current_file, stressed_words, biggest_suffixes, stress_types, poses
<|reserved_special_token_1|>
import re, os, nltk, pymorphy2, sys
from suffix_trees.STree import STree
def make_rules(folder):
rules_dictionary = {}
try:
path = os.path.join(os.getcwd(), 'rules', 'data', folder)
files = os.listdir(path)
except:
path = os.path.join(os.getcwd(), 'data', folder)
files = os.listdir(path)
short_files_rule = re.compile('.txt')
for file in files:
if short_files_rule.search(file) != None:
class_name = re.sub('_', ' ', re.sub('\\.txt', '', file))
current_file = open(os.path.join(path, file), 'r', encoding='utf-8'
).read()
affixes = current_file.split(', ')
rules_dictionary[class_name] = affixes
return rules_dictionary
def find_affixes(rules_noun, lemma, word_possible_stress):
for stress_type, affixes in rules_noun.items():
for affix in affixes:
affix_type = ''
if re.search('^[а-яё]+\\-$', affix) != None:
regexp = '^' + affix[:-1]
affix_type = 'preffix'
elif re.search('^\\-[а-яё]+$', affix) != None:
regexp = affix[1:] + '$'
affix_type = 'suffix'
elif re.search('^[а-яё]+\\-\\.\\.\\.\\-[а-яё]+$', affix) != None:
regexp = '^' + re.sub('\\-\\.\\.\\.\\-', '.+', affix) + '$'
affix_type = 'combination'
if re.search(regexp, lemma) != None:
if stress_type in word_possible_stress:
word_possible_stress[stress_type].append((affix,
affix_type))
else:
word_possible_stress[stress_type] = [(affix, affix_type)]
return word_possible_stress
def find_biggest_affixes(word_possible_stress):
biggest_len_suffix, biggest_len_prefix = 0, 0
biggest_suffix, biggest_prefix = '', ''
if 'all suffixes' in word_possible_stress:
for suffix in word_possible_stress['all suffixes']:
if len(suffix[0]) > biggest_len_suffix:
biggest_suffix = suffix[0]
biggest_len_suffix = len(suffix[0])
del word_possible_stress['all suffixes']
if 'all prefixes' in word_possible_stress:
for prefix in word_possible_stress['all prefixes']:
if len(prefix[0]) > biggest_len_prefix:
biggest_prefix = prefix[0]
biggest_len_prefix = len(prefix[0])
del word_possible_stress['all prefixes']
return biggest_prefix, biggest_suffix, word_possible_stress
def find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):
possible_types = []
for stress_type, affixes in word_possible_stress.items():
for affix in affixes:
if affix[1] == 'suffix':
if affix[0] == biggest_suffix:
possible_types.append(stress_type)
elif affix[1] == 'prefix':
if affix[0] == biggest_prefix:
possible_types.append(stress_type)
elif affix[1] == 'combination':
possible_types = []
pair = affix[0].split('...')
if pair[0] == biggest_prefix and pair[1] == biggest_suffix:
possible_types.append(stress_type)
return possible_types
def make_stressed_word(possible_types, token, lemma, biggest_suffix,
original_token):
if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':
stressed_word = re.sub(
'^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'",
token)
elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', "\\g<1>'", token
)
elif possible_types[0] == 'suffix 2':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',
"\\g<1>'", token)
elif possible_types[0] == 'suffix 3':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
stressed_word = re.sub('^(' + stem_cutted +
'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',
"\\g<1>'", token)
elif possible_types[0] == 'presuffix':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)
for num in range(1, 5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +
'$', '', stem)
suffixes = re.sub(stem_cutted, '', stem)
stressed_word = re.sub(
'([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +
'.{,5})$', "\\g<1>'\\g<2>", token)
elif possible_types[0] == 'type B':
stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',
"\\g<1>'\\g<2>", token)
try:
parts = stressed_word.split("'")
stressed_word = original_token[:len(parts[0])] + "'" + original_token[
len(parts[0]):]
except:
stressed_word = original_token
return stressed_word
def process_stresses(part_of_speech, rules, pos, lemma, token,
original_token, word_possible_stress, current_file):
stressed_word, biggest_suffix, possible_types = '', '', ['']
if part_of_speech in pos:
word_possible_stress = find_affixes(rules, lemma, word_possible_stress)
if word_possible_stress != {} and list(word_possible_stress.keys()
) != ['all prefixes', 'all suffixes'] and list(word_possible_stress
.keys()) != ['all suffixes'] and list(word_possible_stress.keys()
) != ['all prefixes']:
biggest_prefix, biggest_suffix, word_possible_stress = (
find_biggest_affixes(word_possible_stress))
possible_types = find_possible_types(word_possible_stress,
biggest_suffix, biggest_prefix)
if len(possible_types) == 1:
stressed_word = make_stressed_word(possible_types, token,
lemma, biggest_suffix, original_token)
current_file = re.sub(original_token, stressed_word,
current_file)
if possible_types == []:
possible_types = ['']
return current_file, stressed_word, biggest_suffix, possible_types[0]
def initialize(current_file):
morph = pymorphy2.MorphAnalyzer()
rules_noun = make_rules('NOUN')
rules_adj = make_rules('ADJ')
rules_verb = make_rules('VERB')
all_tokens = nltk.word_tokenize(current_file)
stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []
for token in all_tokens:
stressed_word, biggest_suffix, stress_type = token, '', ''
original_token = token
token = token.lower()
word_possible_stress = {}
if re.search('^[А-ЯЁа-яё\\-]+$', token) != None and token != '-':
token = re.sub('^-', '', token)
pos = morph.parse(token)[0].tag.POS
lemma = morph.parse(token)[0].normal_form
if pos != None:
(current_file, stressed_word, biggest_suffix, stress_type) = (
process_stresses('NOUN', rules_noun, pos, lemma, token,
original_token, word_possible_stress, current_file))
if biggest_suffix == '':
(current_file, stressed_word, biggest_suffix, stress_type
) = (process_stresses('ADJF', rules_adj, pos, lemma,
token, original_token, word_possible_stress,
current_file))
if biggest_suffix == '':
(current_file, stressed_word, biggest_suffix,
stress_type) = (process_stresses('VERB',
rules_verb, pos, lemma, token, original_token,
word_possible_stress, current_file))
if stressed_word == '':
stressed_word = original_token
stressed_words.append(stressed_word)
biggest_suffixes.append(biggest_suffix)
stress_types.append(stress_type)
poses.append(pos)
return current_file, stressed_words, biggest_suffixes, stress_types, poses
<|reserved_special_token_1|>
import re, os, nltk, pymorphy2, sys
from suffix_trees.STree import STree
def make_rules(folder):
rules_dictionary = {}
try:
path = os.path.join(os.getcwd(), 'rules', 'data', folder)
files = os.listdir(path)
except:
path = os.path.join(os.getcwd(), 'data', folder)
files = os.listdir(path)
short_files_rule = re.compile('.txt')
for file in files:
if short_files_rule.search(file) != None:
class_name = re.sub('_', ' ', re.sub('\.txt', '', file))
current_file = open(os.path.join(path, file), 'r', encoding='utf-8').read()
affixes = current_file.split(', ')
rules_dictionary[class_name] = affixes
return(rules_dictionary)
def find_affixes(rules_noun, lemma, word_possible_stress):
for stress_type, affixes in rules_noun.items():
for affix in affixes:
affix_type = ''
if re.search('^[а-яё]+\-$', affix) != None:
regexp = '^'+affix[:-1]
affix_type = 'preffix'
elif re.search('^\-[а-яё]+$', affix) != None:
regexp = affix[1:]+'$'
affix_type = 'suffix'
elif re.search('^[а-яё]+\-\.\.\.\-[а-яё]+$', affix) != None:
regexp = '^'+re.sub('\-\.\.\.\-', '.+', affix)+'$'
affix_type = 'combination'
if re.search(regexp, lemma) != None:
if stress_type in word_possible_stress:
word_possible_stress[stress_type].append((affix, affix_type))
else:
word_possible_stress[stress_type] = [(affix, affix_type)]
return(word_possible_stress)
def find_biggest_affixes(word_possible_stress):
biggest_len_suffix, biggest_len_prefix = 0, 0
biggest_suffix, biggest_prefix = '', ''
if 'all suffixes' in word_possible_stress:
for suffix in word_possible_stress['all suffixes']:
if len(suffix[0]) > biggest_len_suffix:
biggest_suffix = suffix[0]
biggest_len_suffix = len(suffix[0])
del word_possible_stress['all suffixes']
if 'all prefixes' in word_possible_stress:
for prefix in word_possible_stress['all prefixes']:
if len(prefix[0]) > biggest_len_prefix:
biggest_prefix = prefix[0]
biggest_len_prefix = len(prefix[0])
del word_possible_stress['all prefixes']
return(biggest_prefix, biggest_suffix, word_possible_stress)
def find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):
possible_types = []
for stress_type, affixes in word_possible_stress.items():
for affix in affixes:
if affix[1] == 'suffix':
if affix[0] == biggest_suffix:
possible_types.append(stress_type)
elif affix[1] == 'prefix':
if affix[0] == biggest_prefix:
possible_types.append(stress_type)
elif affix[1] == 'combination':
possible_types = []
pair = affix[0].split('...')
if pair[0] == biggest_prefix and pair[1] == biggest_suffix:
possible_types.append(stress_type)
return(possible_types)
def make_stressed_word(possible_types, token, lemma, biggest_suffix, original_token):
if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':
stressed_word = re.sub('^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', '\g<1>\'', token)
#print(token, stressed_word, lemma, biggest_prefix, biggest_suffix)
elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
stressed_word = re.sub('^('+stem_cutted+'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', '\g<1>\'', token)
elif possible_types[0] == 'suffix 2':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
stressed_word = re.sub('^('+stem_cutted+'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})', '\g<1>\'', token)
elif possible_types[0] == 'suffix 3':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
stressed_word = re.sub('^('+stem_cutted+'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})', '\g<1>\'', token)
elif possible_types[0] == 'presuffix':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
suffixes = re.sub(stem_cutted, '', stem)
stressed_word = re.sub('([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*'+suffixes+'.{,5})$', '\g<1>\'\g<2>', token)
elif possible_types[0] == 'type B':
stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$', '\g<1>\'\g<2>', token)
try:
parts = stressed_word.split('\'')
stressed_word = original_token[:len(parts[0])]+'\''+original_token[len(parts[0]):]
except:
stressed_word = original_token
return(stressed_word)
def process_stresses(part_of_speech, rules, pos, lemma, token, original_token, word_possible_stress, current_file):
stressed_word, biggest_suffix, possible_types = '', '', ['']
if part_of_speech in pos:
word_possible_stress = find_affixes(rules, lemma, word_possible_stress)
if word_possible_stress != {} and list(word_possible_stress.keys()) != ['all prefixes', 'all suffixes'] and \
list(word_possible_stress.keys()) != ['all suffixes'] and list(word_possible_stress.keys()) != ['all prefixes']:
biggest_prefix, biggest_suffix, word_possible_stress = find_biggest_affixes(word_possible_stress)
possible_types = find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix)
if len(possible_types) == 1:
stressed_word = make_stressed_word(possible_types, token, lemma, biggest_suffix, original_token)
current_file = re.sub(original_token, stressed_word, current_file)
## if pos == 'VERB':
## print(pos, lemma, token, stressed_word, biggest_suffix, possible_types[0])
if possible_types == []: possible_types = ['']
return(current_file, stressed_word, biggest_suffix, possible_types[0])
def initialize(current_file):
morph = pymorphy2.MorphAnalyzer()
rules_noun = make_rules('NOUN')
rules_adj = make_rules('ADJ')
rules_verb = make_rules('VERB')
all_tokens = nltk.word_tokenize(current_file)
stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []
for token in all_tokens:
stressed_word, biggest_suffix, stress_type = token, '', ''
original_token = token
token = token.lower()
word_possible_stress = {}
if re.search('^[А-ЯЁа-яё\-]+$', token) != None and token != '-':
token = re.sub('^-', '', token)
pos = morph.parse(token)[0].tag.POS
#pos = nltk.pos_tag(token, lang='rus')
lemma = morph.parse(token)[0].normal_form
if pos != None:
current_file, stressed_word, biggest_suffix, stress_type = process_stresses('NOUN', rules_noun, pos, lemma, token, original_token, word_possible_stress, current_file)
if biggest_suffix == '':
current_file,stressed_word, biggest_suffix, stress_type = process_stresses('ADJF', rules_adj, pos, lemma, token, original_token, word_possible_stress, current_file)
if biggest_suffix == '':
current_file, stressed_word, biggest_suffix, stress_type = process_stresses('VERB', rules_verb, pos, lemma, token, original_token, word_possible_stress, current_file)
if stressed_word == '':
stressed_word = original_token
stressed_words.append(stressed_word)
biggest_suffixes.append(biggest_suffix)
stress_types.append(stress_type)
poses.append(pos)
return(current_file, stressed_words, biggest_suffixes, stress_types, poses)
|
flexible
|
{
"blob_id": "1bf9785135f6105301d02602e54cbbcbdd249144",
"index": 9283,
"step-1": "<mask token>\n\n\ndef make_rules(folder):\n rules_dictionary = {}\n try:\n path = os.path.join(os.getcwd(), 'rules', 'data', folder)\n files = os.listdir(path)\n except:\n path = os.path.join(os.getcwd(), 'data', folder)\n files = os.listdir(path)\n short_files_rule = re.compile('.txt')\n for file in files:\n if short_files_rule.search(file) != None:\n class_name = re.sub('_', ' ', re.sub('\\\\.txt', '', file))\n current_file = open(os.path.join(path, file), 'r', encoding='utf-8'\n ).read()\n affixes = current_file.split(', ')\n rules_dictionary[class_name] = affixes\n return rules_dictionary\n\n\ndef find_affixes(rules_noun, lemma, word_possible_stress):\n for stress_type, affixes in rules_noun.items():\n for affix in affixes:\n affix_type = ''\n if re.search('^[а-яё]+\\\\-$', affix) != None:\n regexp = '^' + affix[:-1]\n affix_type = 'preffix'\n elif re.search('^\\\\-[а-яё]+$', affix) != None:\n regexp = affix[1:] + '$'\n affix_type = 'suffix'\n elif re.search('^[а-яё]+\\\\-\\\\.\\\\.\\\\.\\\\-[а-яё]+$', affix) != None:\n regexp = '^' + re.sub('\\\\-\\\\.\\\\.\\\\.\\\\-', '.+', affix) + '$'\n affix_type = 'combination'\n if re.search(regexp, lemma) != None:\n if stress_type in word_possible_stress:\n word_possible_stress[stress_type].append((affix,\n affix_type))\n else:\n word_possible_stress[stress_type] = [(affix, affix_type)]\n return word_possible_stress\n\n\n<mask token>\n\n\ndef find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):\n possible_types = []\n for stress_type, affixes in word_possible_stress.items():\n for affix in affixes:\n if affix[1] == 'suffix':\n if affix[0] == biggest_suffix:\n possible_types.append(stress_type)\n elif affix[1] == 'prefix':\n if affix[0] == biggest_prefix:\n possible_types.append(stress_type)\n elif affix[1] == 'combination':\n possible_types = []\n pair = affix[0].split('...')\n if pair[0] == biggest_prefix and pair[1] == biggest_suffix:\n possible_types.append(stress_type)\n return possible_types\n\n\ndef make_stressed_word(possible_types, token, lemma, biggest_suffix,\n original_token):\n if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':\n stressed_word = re.sub(\n '^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\",\n token)\n elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\", token\n )\n elif possible_types[0] == 'suffix 2':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'suffix 3':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'presuffix':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n suffixes = re.sub(stem_cutted, '', stem)\n stressed_word = re.sub(\n '([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +\n '.{,5})$', \"\\\\g<1>'\\\\g<2>\", token)\n elif possible_types[0] == 'type B':\n stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',\n \"\\\\g<1>'\\\\g<2>\", token)\n try:\n parts = stressed_word.split(\"'\")\n stressed_word = original_token[:len(parts[0])] + \"'\" + original_token[\n len(parts[0]):]\n except:\n stressed_word = original_token\n return stressed_word\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_rules(folder):\n rules_dictionary = {}\n try:\n path = os.path.join(os.getcwd(), 'rules', 'data', folder)\n files = os.listdir(path)\n except:\n path = os.path.join(os.getcwd(), 'data', folder)\n files = os.listdir(path)\n short_files_rule = re.compile('.txt')\n for file in files:\n if short_files_rule.search(file) != None:\n class_name = re.sub('_', ' ', re.sub('\\\\.txt', '', file))\n current_file = open(os.path.join(path, file), 'r', encoding='utf-8'\n ).read()\n affixes = current_file.split(', ')\n rules_dictionary[class_name] = affixes\n return rules_dictionary\n\n\ndef find_affixes(rules_noun, lemma, word_possible_stress):\n for stress_type, affixes in rules_noun.items():\n for affix in affixes:\n affix_type = ''\n if re.search('^[а-яё]+\\\\-$', affix) != None:\n regexp = '^' + affix[:-1]\n affix_type = 'preffix'\n elif re.search('^\\\\-[а-яё]+$', affix) != None:\n regexp = affix[1:] + '$'\n affix_type = 'suffix'\n elif re.search('^[а-яё]+\\\\-\\\\.\\\\.\\\\.\\\\-[а-яё]+$', affix) != None:\n regexp = '^' + re.sub('\\\\-\\\\.\\\\.\\\\.\\\\-', '.+', affix) + '$'\n affix_type = 'combination'\n if re.search(regexp, lemma) != None:\n if stress_type in word_possible_stress:\n word_possible_stress[stress_type].append((affix,\n affix_type))\n else:\n word_possible_stress[stress_type] = [(affix, affix_type)]\n return word_possible_stress\n\n\n<mask token>\n\n\ndef find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):\n possible_types = []\n for stress_type, affixes in word_possible_stress.items():\n for affix in affixes:\n if affix[1] == 'suffix':\n if affix[0] == biggest_suffix:\n possible_types.append(stress_type)\n elif affix[1] == 'prefix':\n if affix[0] == biggest_prefix:\n possible_types.append(stress_type)\n elif affix[1] == 'combination':\n possible_types = []\n pair = affix[0].split('...')\n if pair[0] == biggest_prefix and pair[1] == biggest_suffix:\n possible_types.append(stress_type)\n return possible_types\n\n\ndef make_stressed_word(possible_types, token, lemma, biggest_suffix,\n original_token):\n if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':\n stressed_word = re.sub(\n '^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\",\n token)\n elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\", token\n )\n elif possible_types[0] == 'suffix 2':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'suffix 3':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'presuffix':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n suffixes = re.sub(stem_cutted, '', stem)\n stressed_word = re.sub(\n '([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +\n '.{,5})$', \"\\\\g<1>'\\\\g<2>\", token)\n elif possible_types[0] == 'type B':\n stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',\n \"\\\\g<1>'\\\\g<2>\", token)\n try:\n parts = stressed_word.split(\"'\")\n stressed_word = original_token[:len(parts[0])] + \"'\" + original_token[\n len(parts[0]):]\n except:\n stressed_word = original_token\n return stressed_word\n\n\ndef process_stresses(part_of_speech, rules, pos, lemma, token,\n original_token, word_possible_stress, current_file):\n stressed_word, biggest_suffix, possible_types = '', '', ['']\n if part_of_speech in pos:\n word_possible_stress = find_affixes(rules, lemma, word_possible_stress)\n if word_possible_stress != {} and list(word_possible_stress.keys()\n ) != ['all prefixes', 'all suffixes'] and list(word_possible_stress\n .keys()) != ['all suffixes'] and list(word_possible_stress.keys()\n ) != ['all prefixes']:\n biggest_prefix, biggest_suffix, word_possible_stress = (\n find_biggest_affixes(word_possible_stress))\n possible_types = find_possible_types(word_possible_stress,\n biggest_suffix, biggest_prefix)\n if len(possible_types) == 1:\n stressed_word = make_stressed_word(possible_types, token,\n lemma, biggest_suffix, original_token)\n current_file = re.sub(original_token, stressed_word,\n current_file)\n if possible_types == []:\n possible_types = ['']\n return current_file, stressed_word, biggest_suffix, possible_types[0]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef make_rules(folder):\n rules_dictionary = {}\n try:\n path = os.path.join(os.getcwd(), 'rules', 'data', folder)\n files = os.listdir(path)\n except:\n path = os.path.join(os.getcwd(), 'data', folder)\n files = os.listdir(path)\n short_files_rule = re.compile('.txt')\n for file in files:\n if short_files_rule.search(file) != None:\n class_name = re.sub('_', ' ', re.sub('\\\\.txt', '', file))\n current_file = open(os.path.join(path, file), 'r', encoding='utf-8'\n ).read()\n affixes = current_file.split(', ')\n rules_dictionary[class_name] = affixes\n return rules_dictionary\n\n\ndef find_affixes(rules_noun, lemma, word_possible_stress):\n for stress_type, affixes in rules_noun.items():\n for affix in affixes:\n affix_type = ''\n if re.search('^[а-яё]+\\\\-$', affix) != None:\n regexp = '^' + affix[:-1]\n affix_type = 'preffix'\n elif re.search('^\\\\-[а-яё]+$', affix) != None:\n regexp = affix[1:] + '$'\n affix_type = 'suffix'\n elif re.search('^[а-яё]+\\\\-\\\\.\\\\.\\\\.\\\\-[а-яё]+$', affix) != None:\n regexp = '^' + re.sub('\\\\-\\\\.\\\\.\\\\.\\\\-', '.+', affix) + '$'\n affix_type = 'combination'\n if re.search(regexp, lemma) != None:\n if stress_type in word_possible_stress:\n word_possible_stress[stress_type].append((affix,\n affix_type))\n else:\n word_possible_stress[stress_type] = [(affix, affix_type)]\n return word_possible_stress\n\n\ndef find_biggest_affixes(word_possible_stress):\n biggest_len_suffix, biggest_len_prefix = 0, 0\n biggest_suffix, biggest_prefix = '', ''\n if 'all suffixes' in word_possible_stress:\n for suffix in word_possible_stress['all suffixes']:\n if len(suffix[0]) > biggest_len_suffix:\n biggest_suffix = suffix[0]\n biggest_len_suffix = len(suffix[0])\n del word_possible_stress['all suffixes']\n if 'all prefixes' in word_possible_stress:\n for prefix in word_possible_stress['all prefixes']:\n if len(prefix[0]) > biggest_len_prefix:\n biggest_prefix = prefix[0]\n biggest_len_prefix = len(prefix[0])\n del word_possible_stress['all prefixes']\n return biggest_prefix, biggest_suffix, word_possible_stress\n\n\ndef find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):\n possible_types = []\n for stress_type, affixes in word_possible_stress.items():\n for affix in affixes:\n if affix[1] == 'suffix':\n if affix[0] == biggest_suffix:\n possible_types.append(stress_type)\n elif affix[1] == 'prefix':\n if affix[0] == biggest_prefix:\n possible_types.append(stress_type)\n elif affix[1] == 'combination':\n possible_types = []\n pair = affix[0].split('...')\n if pair[0] == biggest_prefix and pair[1] == biggest_suffix:\n possible_types.append(stress_type)\n return possible_types\n\n\ndef make_stressed_word(possible_types, token, lemma, biggest_suffix,\n original_token):\n if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':\n stressed_word = re.sub(\n '^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\",\n token)\n elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\", token\n )\n elif possible_types[0] == 'suffix 2':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'suffix 3':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'presuffix':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n suffixes = re.sub(stem_cutted, '', stem)\n stressed_word = re.sub(\n '([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +\n '.{,5})$', \"\\\\g<1>'\\\\g<2>\", token)\n elif possible_types[0] == 'type B':\n stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',\n \"\\\\g<1>'\\\\g<2>\", token)\n try:\n parts = stressed_word.split(\"'\")\n stressed_word = original_token[:len(parts[0])] + \"'\" + original_token[\n len(parts[0]):]\n except:\n stressed_word = original_token\n return stressed_word\n\n\ndef process_stresses(part_of_speech, rules, pos, lemma, token,\n original_token, word_possible_stress, current_file):\n stressed_word, biggest_suffix, possible_types = '', '', ['']\n if part_of_speech in pos:\n word_possible_stress = find_affixes(rules, lemma, word_possible_stress)\n if word_possible_stress != {} and list(word_possible_stress.keys()\n ) != ['all prefixes', 'all suffixes'] and list(word_possible_stress\n .keys()) != ['all suffixes'] and list(word_possible_stress.keys()\n ) != ['all prefixes']:\n biggest_prefix, biggest_suffix, word_possible_stress = (\n find_biggest_affixes(word_possible_stress))\n possible_types = find_possible_types(word_possible_stress,\n biggest_suffix, biggest_prefix)\n if len(possible_types) == 1:\n stressed_word = make_stressed_word(possible_types, token,\n lemma, biggest_suffix, original_token)\n current_file = re.sub(original_token, stressed_word,\n current_file)\n if possible_types == []:\n possible_types = ['']\n return current_file, stressed_word, biggest_suffix, possible_types[0]\n\n\ndef initialize(current_file):\n morph = pymorphy2.MorphAnalyzer()\n rules_noun = make_rules('NOUN')\n rules_adj = make_rules('ADJ')\n rules_verb = make_rules('VERB')\n all_tokens = nltk.word_tokenize(current_file)\n stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []\n for token in all_tokens:\n stressed_word, biggest_suffix, stress_type = token, '', ''\n original_token = token\n token = token.lower()\n word_possible_stress = {}\n if re.search('^[А-ЯЁа-яё\\\\-]+$', token) != None and token != '-':\n token = re.sub('^-', '', token)\n pos = morph.parse(token)[0].tag.POS\n lemma = morph.parse(token)[0].normal_form\n if pos != None:\n (current_file, stressed_word, biggest_suffix, stress_type) = (\n process_stresses('NOUN', rules_noun, pos, lemma, token,\n original_token, word_possible_stress, current_file))\n if biggest_suffix == '':\n (current_file, stressed_word, biggest_suffix, stress_type\n ) = (process_stresses('ADJF', rules_adj, pos, lemma,\n token, original_token, word_possible_stress,\n current_file))\n if biggest_suffix == '':\n (current_file, stressed_word, biggest_suffix,\n stress_type) = (process_stresses('VERB',\n rules_verb, pos, lemma, token, original_token,\n word_possible_stress, current_file))\n if stressed_word == '':\n stressed_word = original_token\n stressed_words.append(stressed_word)\n biggest_suffixes.append(biggest_suffix)\n stress_types.append(stress_type)\n poses.append(pos)\n return current_file, stressed_words, biggest_suffixes, stress_types, poses\n",
"step-4": "import re, os, nltk, pymorphy2, sys\nfrom suffix_trees.STree import STree\n\n\ndef make_rules(folder):\n rules_dictionary = {}\n try:\n path = os.path.join(os.getcwd(), 'rules', 'data', folder)\n files = os.listdir(path)\n except:\n path = os.path.join(os.getcwd(), 'data', folder)\n files = os.listdir(path)\n short_files_rule = re.compile('.txt')\n for file in files:\n if short_files_rule.search(file) != None:\n class_name = re.sub('_', ' ', re.sub('\\\\.txt', '', file))\n current_file = open(os.path.join(path, file), 'r', encoding='utf-8'\n ).read()\n affixes = current_file.split(', ')\n rules_dictionary[class_name] = affixes\n return rules_dictionary\n\n\ndef find_affixes(rules_noun, lemma, word_possible_stress):\n for stress_type, affixes in rules_noun.items():\n for affix in affixes:\n affix_type = ''\n if re.search('^[а-яё]+\\\\-$', affix) != None:\n regexp = '^' + affix[:-1]\n affix_type = 'preffix'\n elif re.search('^\\\\-[а-яё]+$', affix) != None:\n regexp = affix[1:] + '$'\n affix_type = 'suffix'\n elif re.search('^[а-яё]+\\\\-\\\\.\\\\.\\\\.\\\\-[а-яё]+$', affix) != None:\n regexp = '^' + re.sub('\\\\-\\\\.\\\\.\\\\.\\\\-', '.+', affix) + '$'\n affix_type = 'combination'\n if re.search(regexp, lemma) != None:\n if stress_type in word_possible_stress:\n word_possible_stress[stress_type].append((affix,\n affix_type))\n else:\n word_possible_stress[stress_type] = [(affix, affix_type)]\n return word_possible_stress\n\n\ndef find_biggest_affixes(word_possible_stress):\n biggest_len_suffix, biggest_len_prefix = 0, 0\n biggest_suffix, biggest_prefix = '', ''\n if 'all suffixes' in word_possible_stress:\n for suffix in word_possible_stress['all suffixes']:\n if len(suffix[0]) > biggest_len_suffix:\n biggest_suffix = suffix[0]\n biggest_len_suffix = len(suffix[0])\n del word_possible_stress['all suffixes']\n if 'all prefixes' in word_possible_stress:\n for prefix in word_possible_stress['all prefixes']:\n if len(prefix[0]) > biggest_len_prefix:\n biggest_prefix = prefix[0]\n biggest_len_prefix = len(prefix[0])\n del word_possible_stress['all prefixes']\n return biggest_prefix, biggest_suffix, word_possible_stress\n\n\ndef find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):\n possible_types = []\n for stress_type, affixes in word_possible_stress.items():\n for affix in affixes:\n if affix[1] == 'suffix':\n if affix[0] == biggest_suffix:\n possible_types.append(stress_type)\n elif affix[1] == 'prefix':\n if affix[0] == biggest_prefix:\n possible_types.append(stress_type)\n elif affix[1] == 'combination':\n possible_types = []\n pair = affix[0].split('...')\n if pair[0] == biggest_prefix and pair[1] == biggest_suffix:\n possible_types.append(stress_type)\n return possible_types\n\n\ndef make_stressed_word(possible_types, token, lemma, biggest_suffix,\n original_token):\n if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':\n stressed_word = re.sub(\n '^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\",\n token)\n elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', \"\\\\g<1>'\", token\n )\n elif possible_types[0] == 'suffix 2':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'suffix 3':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n stressed_word = re.sub('^(' + stem_cutted +\n '([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})',\n \"\\\\g<1>'\", token)\n elif possible_types[0] == 'presuffix':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix) + '$', '', stem)\n for num in range(1, 5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num] +\n '$', '', stem)\n suffixes = re.sub(stem_cutted, '', stem)\n stressed_word = re.sub(\n '([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*' + suffixes +\n '.{,5})$', \"\\\\g<1>'\\\\g<2>\", token)\n elif possible_types[0] == 'type B':\n stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$',\n \"\\\\g<1>'\\\\g<2>\", token)\n try:\n parts = stressed_word.split(\"'\")\n stressed_word = original_token[:len(parts[0])] + \"'\" + original_token[\n len(parts[0]):]\n except:\n stressed_word = original_token\n return stressed_word\n\n\ndef process_stresses(part_of_speech, rules, pos, lemma, token,\n original_token, word_possible_stress, current_file):\n stressed_word, biggest_suffix, possible_types = '', '', ['']\n if part_of_speech in pos:\n word_possible_stress = find_affixes(rules, lemma, word_possible_stress)\n if word_possible_stress != {} and list(word_possible_stress.keys()\n ) != ['all prefixes', 'all suffixes'] and list(word_possible_stress\n .keys()) != ['all suffixes'] and list(word_possible_stress.keys()\n ) != ['all prefixes']:\n biggest_prefix, biggest_suffix, word_possible_stress = (\n find_biggest_affixes(word_possible_stress))\n possible_types = find_possible_types(word_possible_stress,\n biggest_suffix, biggest_prefix)\n if len(possible_types) == 1:\n stressed_word = make_stressed_word(possible_types, token,\n lemma, biggest_suffix, original_token)\n current_file = re.sub(original_token, stressed_word,\n current_file)\n if possible_types == []:\n possible_types = ['']\n return current_file, stressed_word, biggest_suffix, possible_types[0]\n\n\ndef initialize(current_file):\n morph = pymorphy2.MorphAnalyzer()\n rules_noun = make_rules('NOUN')\n rules_adj = make_rules('ADJ')\n rules_verb = make_rules('VERB')\n all_tokens = nltk.word_tokenize(current_file)\n stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []\n for token in all_tokens:\n stressed_word, biggest_suffix, stress_type = token, '', ''\n original_token = token\n token = token.lower()\n word_possible_stress = {}\n if re.search('^[А-ЯЁа-яё\\\\-]+$', token) != None and token != '-':\n token = re.sub('^-', '', token)\n pos = morph.parse(token)[0].tag.POS\n lemma = morph.parse(token)[0].normal_form\n if pos != None:\n (current_file, stressed_word, biggest_suffix, stress_type) = (\n process_stresses('NOUN', rules_noun, pos, lemma, token,\n original_token, word_possible_stress, current_file))\n if biggest_suffix == '':\n (current_file, stressed_word, biggest_suffix, stress_type\n ) = (process_stresses('ADJF', rules_adj, pos, lemma,\n token, original_token, word_possible_stress,\n current_file))\n if biggest_suffix == '':\n (current_file, stressed_word, biggest_suffix,\n stress_type) = (process_stresses('VERB',\n rules_verb, pos, lemma, token, original_token,\n word_possible_stress, current_file))\n if stressed_word == '':\n stressed_word = original_token\n stressed_words.append(stressed_word)\n biggest_suffixes.append(biggest_suffix)\n stress_types.append(stress_type)\n poses.append(pos)\n return current_file, stressed_words, biggest_suffixes, stress_types, poses\n",
"step-5": "import re, os, nltk, pymorphy2, sys\nfrom suffix_trees.STree import STree\n\n\ndef make_rules(folder):\n rules_dictionary = {}\n try:\n path = os.path.join(os.getcwd(), 'rules', 'data', folder)\n files = os.listdir(path)\n except:\n path = os.path.join(os.getcwd(), 'data', folder)\n files = os.listdir(path)\n short_files_rule = re.compile('.txt')\n for file in files:\n if short_files_rule.search(file) != None:\n class_name = re.sub('_', ' ', re.sub('\\.txt', '', file))\n current_file = open(os.path.join(path, file), 'r', encoding='utf-8').read()\n affixes = current_file.split(', ')\n rules_dictionary[class_name] = affixes\n return(rules_dictionary)\n\n\ndef find_affixes(rules_noun, lemma, word_possible_stress):\n for stress_type, affixes in rules_noun.items():\n for affix in affixes:\n affix_type = ''\n if re.search('^[а-яё]+\\-$', affix) != None:\n regexp = '^'+affix[:-1]\n affix_type = 'preffix'\n elif re.search('^\\-[а-яё]+$', affix) != None:\n regexp = affix[1:]+'$'\n affix_type = 'suffix'\n elif re.search('^[а-яё]+\\-\\.\\.\\.\\-[а-яё]+$', affix) != None:\n regexp = '^'+re.sub('\\-\\.\\.\\.\\-', '.+', affix)+'$'\n affix_type = 'combination'\n\n if re.search(regexp, lemma) != None:\n if stress_type in word_possible_stress:\n word_possible_stress[stress_type].append((affix, affix_type))\n else:\n word_possible_stress[stress_type] = [(affix, affix_type)]\n return(word_possible_stress)\n\n\ndef find_biggest_affixes(word_possible_stress):\n biggest_len_suffix, biggest_len_prefix = 0, 0\n biggest_suffix, biggest_prefix = '', ''\n if 'all suffixes' in word_possible_stress:\n for suffix in word_possible_stress['all suffixes']:\n if len(suffix[0]) > biggest_len_suffix:\n biggest_suffix = suffix[0]\n biggest_len_suffix = len(suffix[0])\n del word_possible_stress['all suffixes']\n \n if 'all prefixes' in word_possible_stress:\n for prefix in word_possible_stress['all prefixes']:\n if len(prefix[0]) > biggest_len_prefix:\n biggest_prefix = prefix[0]\n biggest_len_prefix = len(prefix[0])\n del word_possible_stress['all prefixes']\n return(biggest_prefix, biggest_suffix, word_possible_stress)\n\n\ndef find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):\n possible_types = []\n for stress_type, affixes in word_possible_stress.items():\n for affix in affixes:\n if affix[1] == 'suffix':\n if affix[0] == biggest_suffix:\n possible_types.append(stress_type)\n elif affix[1] == 'prefix':\n if affix[0] == biggest_prefix:\n possible_types.append(stress_type)\n elif affix[1] == 'combination':\n possible_types = []\n pair = affix[0].split('...')\n if pair[0] == biggest_prefix and pair[1] == biggest_suffix:\n possible_types.append(stress_type)\n return(possible_types)\n\n\ndef make_stressed_word(possible_types, token, lemma, biggest_suffix, original_token): \n if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':\n stressed_word = re.sub('^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', '\\g<1>\\'', token)\n #print(token, stressed_word, lemma, biggest_prefix, biggest_suffix)\n elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)\n for num in range(1,5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)\n stressed_word = re.sub('^('+stem_cutted+'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', '\\g<1>\\'', token)\n elif possible_types[0] == 'suffix 2':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)\n for num in range(1,5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)\n stressed_word = re.sub('^('+stem_cutted+'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})', '\\g<1>\\'', token)\n\n elif possible_types[0] == 'suffix 3':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)\n for num in range(1,5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)\n stressed_word = re.sub('^('+stem_cutted+'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})', '\\g<1>\\'', token)\n \n elif possible_types[0] == 'presuffix':\n stem = STree([token, lemma]).lcs()\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)\n for num in range(1,5):\n if stem == stem_cutted:\n stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)\n suffixes = re.sub(stem_cutted, '', stem)\n stressed_word = re.sub('([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*'+suffixes+'.{,5})$', '\\g<1>\\'\\g<2>', token)\n elif possible_types[0] == 'type B':\n stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$', '\\g<1>\\'\\g<2>', token)\n try:\n parts = stressed_word.split('\\'')\n stressed_word = original_token[:len(parts[0])]+'\\''+original_token[len(parts[0]):]\n except:\n stressed_word = original_token\n return(stressed_word)\n\n\ndef process_stresses(part_of_speech, rules, pos, lemma, token, original_token, word_possible_stress, current_file):\n stressed_word, biggest_suffix, possible_types = '', '', ['']\n if part_of_speech in pos:\n word_possible_stress = find_affixes(rules, lemma, word_possible_stress)\n\n if word_possible_stress != {} and list(word_possible_stress.keys()) != ['all prefixes', 'all suffixes'] and \\\n list(word_possible_stress.keys()) != ['all suffixes'] and list(word_possible_stress.keys()) != ['all prefixes']:\n\n biggest_prefix, biggest_suffix, word_possible_stress = find_biggest_affixes(word_possible_stress)\n possible_types = find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix)\n if len(possible_types) == 1:\n stressed_word = make_stressed_word(possible_types, token, lemma, biggest_suffix, original_token)\n current_file = re.sub(original_token, stressed_word, current_file)\n## if pos == 'VERB':\n## print(pos, lemma, token, stressed_word, biggest_suffix, possible_types[0])\n if possible_types == []: possible_types = ['']\n return(current_file, stressed_word, biggest_suffix, possible_types[0])\n\n\ndef initialize(current_file):\n morph = pymorphy2.MorphAnalyzer()\n rules_noun = make_rules('NOUN')\n rules_adj = make_rules('ADJ')\n rules_verb = make_rules('VERB')\n all_tokens = nltk.word_tokenize(current_file)\n stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []\n for token in all_tokens:\n stressed_word, biggest_suffix, stress_type = token, '', ''\n original_token = token\n token = token.lower()\n word_possible_stress = {}\n if re.search('^[А-ЯЁа-яё\\-]+$', token) != None and token != '-':\n token = re.sub('^-', '', token)\n pos = morph.parse(token)[0].tag.POS\n #pos = nltk.pos_tag(token, lang='rus')\n lemma = morph.parse(token)[0].normal_form\n if pos != None:\n current_file, stressed_word, biggest_suffix, stress_type = process_stresses('NOUN', rules_noun, pos, lemma, token, original_token, word_possible_stress, current_file)\n if biggest_suffix == '':\n current_file,stressed_word, biggest_suffix, stress_type = process_stresses('ADJF', rules_adj, pos, lemma, token, original_token, word_possible_stress, current_file)\n if biggest_suffix == '':\n current_file, stressed_word, biggest_suffix, stress_type = process_stresses('VERB', rules_verb, pos, lemma, token, original_token, word_possible_stress, current_file)\n if stressed_word == '':\n stressed_word = original_token\n stressed_words.append(stressed_word)\n biggest_suffixes.append(biggest_suffix)\n stress_types.append(stress_type)\n poses.append(pos)\n return(current_file, stressed_words, biggest_suffixes, stress_types, poses)\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
"""
Unit Tests for endpoints.py
"""
import unittest
import os # pylint: disable=unused-import
from mock import patch, call
from github_approval_checker.utils import util # pylint: disable=unused-import
from github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import
from github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import
from github_approval_checker.api import endpoints # pylint: disable=unused-import
class EndpointsUnitTests(unittest.TestCase):
"""
Test endpoints.py
"""
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
handler.get_statuses.return_value = [
{
"state": "error",
"context": "context2",
"target_url": "fake://status_target_2",
"description": "Status Check 2"
},
{
"state": "pending",
"context": "context3",
"target_url": "fake://status_target_3",
"description": "Status Check 3"
},
{
"state": "failure",
"context": "context1",
"target_url": "fake://status_target_1",
"description": "Status Check 1"
}
]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "approved",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
handler.post_status.side_effect = [
201,
400
]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with("repo-full-name", "review-commit-id")
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([
call(
"repo-full-name",
"review-commit-id",
"context2",
"fake://status_target_2",
"review-user-login",
"Status Check 2"
),
call(
"repo-full-name",
"review-commit-id",
"context1",
"fake://status_target_1",
"review-user-login",
"Status Check 1"
)
])
self.assertEqual(response, util.STATUS_OK)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_unapproved(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
def test_post_pull_request_review_missing(
self,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError("config-error", "{'message': 'bad-config'}")
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_config(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
validate_config.side_effect = ConfigError(
'Config Validation Error',
({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)
)
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with("repo-full-name", None)
validate_config.assert_called_once_with("config-data")
self.assertEqual(
response,
(
{
'status': 'Config Validation Error',
'message': 'Bad config data'
},
500
)
)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_sign(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError("Error validating signature")
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(
response,
(
{
'status': 'Signature Validation Error',
'message': 'Error validating signature'
},
400
)
)
|
normal
|
{
"blob_id": "7626202d1e3ec7321addbb028be2275b882efda2",
"index": 6453,
"step-1": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-2": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-3": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-4": "<mask token>\nimport unittest\nimport os\nfrom mock import patch, call\nfrom github_approval_checker.utils import util\nfrom github_approval_checker.utils.github_handler import GithubHandler\nfrom github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError\nfrom github_approval_checker.api import endpoints\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-5": "\"\"\"\nUnit Tests for endpoints.py\n\"\"\"\n\nimport unittest\nimport os # pylint: disable=unused-import\nfrom mock import patch, call\nfrom github_approval_checker.utils import util # pylint: disable=unused-import\nfrom github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import\nfrom github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import\nfrom github_approval_checker.api import endpoints # pylint: disable=unused-import\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = {\n \"context1\": [\n \"whitelist1\"\n ],\n \"context2\": [\n \"whitelist2\"\n ]\n }\n\n handler.get_statuses.return_value = [\n {\n \"state\": \"error\",\n \"context\": \"context2\",\n \"target_url\": \"fake://status_target_2\",\n \"description\": \"Status Check 2\"\n },\n {\n \"state\": \"pending\",\n \"context\": \"context3\",\n \"target_url\": \"fake://status_target_3\",\n \"description\": \"Status Check 3\"\n },\n {\n \"state\": \"failure\",\n \"context\": \"context1\",\n \"target_url\": \"fake://status_target_1\",\n \"description\": \"Status Check 1\"\n }\n ]\n\n handler.is_authorized.return_value = True\n\n validate_config.return_value = None\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"approved\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n handler.post_status.side_effect = [\n 201,\n 400\n ]\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_called_once_with(\"repo-full-name\", \"review-commit-id\")\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([\n call(\n \"repo-full-name\",\n \"review-commit-id\",\n \"context2\",\n \"fake://status_target_2\",\n \"review-user-login\",\n \"Status Check 2\"\n ),\n call(\n \"repo-full-name\",\n \"review-commit-id\",\n \"context1\",\n \"fake://status_target_1\",\n \"review-user-login\",\n \"Status Check 1\"\n )\n ])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_unapproved(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = {\n \"context1\": [\n \"whitelist1\"\n ],\n \"context2\": [\n \"whitelist2\"\n ]\n }\n\n validate_config.return_value = None\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n def test_post_pull_request_review_missing(\n self,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError(\"config-error\", \"{'message': 'bad-config'}\")\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_bad_config(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n validate_config.side_effect = ConfigError(\n 'Config Validation Error',\n ({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)\n )\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with(\"repo-full-name\", None)\n validate_config.assert_called_once_with(\"config-data\")\n self.assertEqual(\n response,\n (\n {\n 'status': 'Config Validation Error',\n 'message': 'Bad config data'\n },\n 500\n )\n )\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_bad_sign(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\"Error validating signature\")\n\n response = endpoints.post_pull_request_review({})\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(\n response,\n (\n {\n 'status': 'Signature Validation Error',\n 'message': 'Error validating signature'\n },\n 400\n )\n )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django.urls import path
from .authentication import GetToken, RegisterUserAPIView
from .resurses import *
urlpatterns = [
path('register/', RegisterUserAPIView.as_view()),
path('get/token/', GetToken.as_view()),
path('card/list/', ShowCardsAPIView.as_view()),
path('card/create/', CreateCardAPIView.as_view()),
path('card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()),
path('card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()),
path('card/<int:pk>/delete/', DeleteCardAPIView.as_view()),
path('card/<int:pk>/update/', UpdateCardAPIView.as_view()),
path('card/get/', GetCardSListAPIView.as_view()),
]
|
normal
|
{
"blob_id": "aac334256c1e05ef33a54da19925911af6645a10",
"index": 9529,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('register/', RegisterUserAPIView.as_view()), path(\n 'get/token/', GetToken.as_view()), path('card/list/', ShowCardsAPIView.\n as_view()), path('card/create/', CreateCardAPIView.as_view()), path(\n 'card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()), path(\n 'card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()), path(\n 'card/<int:pk>/delete/', DeleteCardAPIView.as_view()), path(\n 'card/<int:pk>/update/', UpdateCardAPIView.as_view()), path('card/get/',\n GetCardSListAPIView.as_view())]\n",
"step-3": "from django.urls import path\nfrom .authentication import GetToken, RegisterUserAPIView\nfrom .resurses import *\nurlpatterns = [path('register/', RegisterUserAPIView.as_view()), path(\n 'get/token/', GetToken.as_view()), path('card/list/', ShowCardsAPIView.\n as_view()), path('card/create/', CreateCardAPIView.as_view()), path(\n 'card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()), path(\n 'card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()), path(\n 'card/<int:pk>/delete/', DeleteCardAPIView.as_view()), path(\n 'card/<int:pk>/update/', UpdateCardAPIView.as_view()), path('card/get/',\n GetCardSListAPIView.as_view())]\n",
"step-4": "from django.urls import path\n\nfrom .authentication import GetToken, RegisterUserAPIView\nfrom .resurses import *\n\nurlpatterns = [\n path('register/', RegisterUserAPIView.as_view()),\n path('get/token/', GetToken.as_view()),\n path('card/list/', ShowCardsAPIView.as_view()),\n path('card/create/', CreateCardAPIView.as_view()),\n path('card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()),\n path('card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()),\n path('card/<int:pk>/delete/', DeleteCardAPIView.as_view()),\n path('card/<int:pk>/update/', UpdateCardAPIView.as_view()),\n path('card/get/', GetCardSListAPIView.as_view()),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import UserInfo
# Register your models here.
class UserInfoAdmin(admin.ModelAdmin):
list_display=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
search_fields=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_display_links=[
'user_name',
# 'user_profession',
# 'user_phone',
# 'user_email',
# 'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_editable = [
# 'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
# 'facebook_link',
# 'instagram_link',
# 'telegram_link',
# 'whatsup_link',
# 'linkedin_link',
# 'github_link',
# 'stackoverflow_link',
# 'facebook_link',
]
fieldsets=(
('Basic Info', {'fields' : [
'user_image',
'user_name',
'user_profession',
],
},
),
(
'Contact Info', {
'fields': [
'user_phone',
'user_email',
'user_address',
],
},
),
(
'Social Links', {
'fields': [
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
],
},
),
(
'Core Info', {
'fields' :[
'user_info',
'user_experience',
'user_edu',
],
},
),
)
formfield_overrides = {
models.TextField: {'widget': TinyMCE}
}
admin.site.register(UserInfo, UserInfoAdmin)
|
normal
|
{
"blob_id": "15134d7e4036c102bc9d2ba4d321fadd0467100f",
"index": 6637,
"step-1": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\nadmin.site.register(UserInfo, UserInfoAdmin)\n",
"step-4": "from django.contrib import admin\nfrom django.db import models\nfrom tinymce.widgets import TinyMCE\nfrom .models import UserInfo\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\nadmin.site.register(UserInfo, UserInfoAdmin)\n",
"step-5": "from django.contrib import admin\nfrom django.db import models\nfrom tinymce.widgets import TinyMCE\n\nfrom .models import UserInfo\n\n# Register your models here.\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display=[\n 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n search_fields=[\n 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n list_display_links=[\n 'user_name', \n # 'user_profession', \n # 'user_phone', \n # 'user_email', \n # 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n list_editable = [\n # 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n # 'facebook_link', \n # 'instagram_link', \n # 'telegram_link', \n # 'whatsup_link', \n # 'linkedin_link', \n # 'github_link', \n # 'stackoverflow_link', \n # 'facebook_link', \n ]\n\n fieldsets=(\n ('Basic Info', {'fields' : [\n 'user_image', \n 'user_name', \n 'user_profession', \n ],\n },\n ),\n (\n 'Contact Info', {\n 'fields': [\n 'user_phone', \n 'user_email', \n 'user_address', \n ],\n },\n ),\n (\n 'Social Links', {\n 'fields': [\n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n ],\n },\n ),\n (\n 'Core Info', {\n 'fields' :[\n 'user_info',\n 'user_experience',\n 'user_edu',\n ],\n },\n ),\n )\n formfield_overrides = {\n models.TextField: {'widget': TinyMCE}\n }\nadmin.site.register(UserInfo, UserInfoAdmin)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
<|reserved_special_token_0|>
def pay_callback(request, checkoutid):
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if t.status == 0 or t.status == 3:
return HttpResponseRedirect('/pay/error/')
elif t.status == 4:
return HttpResponseRedirect('/pay/success/')
elif t.status == 1 or t.status == 2:
return render(request, 'remotepay/hold.djhtml', {'checkoutid':
checkoutid})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
<|reserved_special_token_0|>
def pay_callback(request, checkoutid):
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if t.status == 0 or t.status == 3:
return HttpResponseRedirect('/pay/error/')
elif t.status == 4:
return HttpResponseRedirect('/pay/success/')
elif t.status == 1 or t.status == 2:
return render(request, 'remotepay/hold.djhtml', {'checkoutid':
checkoutid})
def pay_success(request):
return render(request, 'remotepay/success.djhtml')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
def pay(request):
if request.method == 'POST':
form = RemotePayForm(request.POST)
if form.is_valid():
phone = form.cleaned_data['phone']
amount = form.cleaned_data['amount']
try:
user = User.objects.get(phone=phone, is_crew=False)
except User.DoesNotExist:
return render(request, 'remotepay/pay.djhtml', {'form':
form, 'error': True})
t = SumUpOnline.objects.create(user=user, amount=amount)
try:
txid = create_checkout(SumUpAPIKey.objects.all().last(), t.
id, t.amount, user.phone)
t.transaction_id = txid
t.status = 1
t.save()
return render(request, 'remotepay/process.djhtml', {'txid':
txid, 'phone': phone, 'amount': amount})
except:
return render(request, 'remotepay/pay.djhtml', {'form':
form, 'systemerror': True})
else:
form = RemotePayForm
return render(request, 'remotepay/pay.djhtml', {'form': form})
def pay_callback(request, checkoutid):
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if t.status == 0 or t.status == 3:
return HttpResponseRedirect('/pay/error/')
elif t.status == 4:
return HttpResponseRedirect('/pay/success/')
elif t.status == 1 or t.status == 2:
return render(request, 'remotepay/hold.djhtml', {'checkoutid':
checkoutid})
def pay_success(request):
return render(request, 'remotepay/success.djhtml')
def pay_error(request):
return render(request, 'remotepay/error.djhtml')
def pay_hold(request):
return render(request, 'remotepay/hold.djhtml')
<|reserved_special_token_1|>
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import TemplateView
from pos.service.sumup import API_URL, create_checkout
from pos.models.sumup import SumUpAPIKey, SumUpOnline
from pos.forms import RemotePayForm
from pos.models.user import User
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
def pay(request):
if request.method == 'POST':
form = RemotePayForm(request.POST)
if form.is_valid():
phone = form.cleaned_data['phone']
amount = form.cleaned_data['amount']
try:
user = User.objects.get(phone=phone, is_crew=False)
except User.DoesNotExist:
return render(request, 'remotepay/pay.djhtml', {'form':
form, 'error': True})
t = SumUpOnline.objects.create(user=user, amount=amount)
try:
txid = create_checkout(SumUpAPIKey.objects.all().last(), t.
id, t.amount, user.phone)
t.transaction_id = txid
t.status = 1
t.save()
return render(request, 'remotepay/process.djhtml', {'txid':
txid, 'phone': phone, 'amount': amount})
except:
return render(request, 'remotepay/pay.djhtml', {'form':
form, 'systemerror': True})
else:
form = RemotePayForm
return render(request, 'remotepay/pay.djhtml', {'form': form})
def pay_callback(request, checkoutid):
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if t.status == 0 or t.status == 3:
return HttpResponseRedirect('/pay/error/')
elif t.status == 4:
return HttpResponseRedirect('/pay/success/')
elif t.status == 1 or t.status == 2:
return render(request, 'remotepay/hold.djhtml', {'checkoutid':
checkoutid})
def pay_success(request):
return render(request, 'remotepay/success.djhtml')
def pay_error(request):
return render(request, 'remotepay/error.djhtml')
def pay_hold(request):
return render(request, 'remotepay/hold.djhtml')
<|reserved_special_token_1|>
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import TemplateView
from pos.service.sumup import API_URL, create_checkout
from pos.models.sumup import SumUpAPIKey, SumUpOnline
from pos.forms import RemotePayForm
from pos.models.user import User
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
def pay(request):
if request.method == 'POST':
form = RemotePayForm(request.POST)
if form.is_valid():
phone = form.cleaned_data['phone']
amount = form.cleaned_data['amount']
# Check if user exists
try:
user = User.objects.get(phone=phone, is_crew=False)
except User.DoesNotExist:
return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True})
# Assuming the user exists, we proceed
t = SumUpOnline.objects.create(user=user, amount=amount)
try:
txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone)
t.transaction_id = txid
t.status = 1
t.save()
return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount})
except:
return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True})
else:
form = RemotePayForm
return render(request, 'remotepay/pay.djhtml', {'form': form})
def pay_callback(request, checkoutid):
# Get the status of the transaction for the user
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if (t.status == 0 or t.status == 3):
return HttpResponseRedirect('/pay/error/')
elif (t.status == 4):
return HttpResponseRedirect('/pay/success/')
elif (t.status == 1) or (t.status == 2):
return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid})
def pay_success(request):
return render(request, 'remotepay/success.djhtml')
def pay_error(request):
return render(request, 'remotepay/error.djhtml')
def pay_hold(request):
return render(request, 'remotepay/hold.djhtml')
|
flexible
|
{
"blob_id": "731d2891bbc29879fd8900a11077c93550e4e88d",
"index": 4251,
"step-1": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\n<mask token>\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\n<mask token>\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'error': True})\n t = SumUpOnline.objects.create(user=user, amount=amount)\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.\n id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid':\n txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'systemerror': True})\n else:\n form = RemotePayForm\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n",
"step-4": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom pos.service.sumup import API_URL, create_checkout\nfrom pos.models.sumup import SumUpAPIKey, SumUpOnline\nfrom pos.forms import RemotePayForm\nfrom pos.models.user import User\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'error': True})\n t = SumUpOnline.objects.create(user=user, amount=amount)\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.\n id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid':\n txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'systemerror': True})\n else:\n form = RemotePayForm\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n",
"step-5": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom pos.service.sumup import API_URL, create_checkout\nfrom pos.models.sumup import SumUpAPIKey, SumUpOnline\n\nfrom pos.forms import RemotePayForm\nfrom pos.models.user import User\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n # Check if user exists\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True})\n\n # Assuming the user exists, we proceed\n t = SumUpOnline.objects.create(user=user, amount=amount)\n\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True})\n\n else:\n form = RemotePayForm\n\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\ndef pay_callback(request, checkoutid):\n # Get the status of the transaction for the user\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n\n if (t.status == 0 or t.status == 3):\n return HttpResponseRedirect('/pay/error/')\n elif (t.status == 4):\n return HttpResponseRedirect('/pay/success/')\n elif (t.status == 1) or (t.status == 2):\n return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n",
"step-ids": [
3,
4,
7,
8,
9
]
}
|
[
3,
4,
7,
8,
9
] |
from typing import List, Any, Callable, Iterable, TypeVar, Tuple
T = TypeVar('T')
def partition(pred: Callable[[T], bool], it: Iterable[T]) \
-> Tuple[List[T], List[T]]: ...
|
normal
|
{
"blob_id": "8e443d136a4e9fcdd18a106192f9c097928b8c99",
"index": 7340,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef partition(pred: Callable[[T], bool], it: Iterable[T]) ->Tuple[List[T],\n List[T]]:\n ...\n",
"step-3": "<mask token>\nT = TypeVar('T')\n\n\ndef partition(pred: Callable[[T], bool], it: Iterable[T]) ->Tuple[List[T],\n List[T]]:\n ...\n",
"step-4": "from typing import List, Any, Callable, Iterable, TypeVar, Tuple\nT = TypeVar('T')\n\n\ndef partition(pred: Callable[[T], bool], it: Iterable[T]) ->Tuple[List[T],\n List[T]]:\n ...\n",
"step-5": "from typing import List, Any, Callable, Iterable, TypeVar, Tuple\n\nT = TypeVar('T')\n\ndef partition(pred: Callable[[T], bool], it: Iterable[T]) \\\n -> Tuple[List[T], List[T]]: ...\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def simple_formatter(zipcode: str, address: str) ->str:
return f'{zipcode}は「{address}」です'
|
flexible
|
{
"blob_id": "b1dce573e6da81c688b338277af214838bbab9dd",
"index": 8649,
"step-1": "<mask token>\n",
"step-2": "def simple_formatter(zipcode: str, address: str) ->str:\n return f'{zipcode}は「{address}」です'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# MolecularMatch API (MM-DATA) Python Example Sheet
# Based on documentation at https://api.molecularmatch.com
# Author: Shane Neeley, MolecularMatch Inc., Jan. 30, 2018
import requests
import json
import numpy as np
import sys
resourceURLs = {
"trialSearch": "/v2/search/trials",
"drugSearch": "/v2/search/drugs",
"publicationSearch": "/v2/search/publications",
"mutationGet": "/v2/mutation/get",
"geneGet": "/v2/gene/get",
"mutationClassify": "/v2/mutation/classify",
"validateTerms": "/v2/validate/terms",
"assertionSearch": "/v2/search/assertions",
"assertionExport": "/v2/export/assertions"
}
mmService = "https://api.molecularmatch.com"
# CHANGE THIS TO YOUR KEY or use as parameter (e.g. $ python3 publicationsAPI.py key)
apiKey = '<your api key>'
if apiKey == '<your api key>' and sys.argv[1]:
apiKey = sys.argv[1]
#// TODO: geolocation searches
#####################search trials##################################
url = mmService + resourceURLs["trialSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
##################################################################
#####################SCENARIOS####################################
##################################################################
#### Clinical trial reporting
# When looking up trials for an actual patient, it is important to include the filters of Enrolling and Interventional
url = mmService + resourceURLs["trialSearch"]
filters = [
{"facet":"CONDITION","term":"Colorectal cancer"},
{"facet":"MUTATION","term":"BRAF V600E"},
{"facet":"STATUS", "term":"Enrolling"},
{"facet":"TRIALTYPE", "term":"Interventional"},
{"facet":"COUNTRY", "term":"France"}
]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
# Question: how many trials for a patient with this mutation and disease are interventional and enrolling in France?
print(r.json()['total'])
# Answer: 4
# Question: what are these trials ClinicalTrials.gov IDs and titles and email addresses for contact?
for i in np.arange(0, len(r.json()['rows']) ):
print(r.json()['rows'][i]['id'])
print(r.json()['rows'][i]['briefTitle'])
print(r.json()['rows'][i]['overallContact'])
# Answer:
# NCT02291289 - A Multi-Center Study of Biomarker-Driven Therapy in Metastatic Colorectal Cancer - [email protected]
# NCT01677741 - A Study to Determine Safety, Tolerability and Pharmacokinetics of Oral Dabrafenib In Children and Adolescent Subjects - [email protected]
# NCT02788279 - A Study to Investigate Efficacy and Safety of Cobimetinib Plus Atezolizumab and Atezolizumab Monotherapy Versus Regorafenib in Participants With Metastatic Colorectal Adenocarcinoma - [email protected]
# NCT02751177 - Detection of KRAS, NRAS et BRAF Mutations in Plasma Circulating DNA From Patients With Metastatic Colorectal Cancer - [email protected]
# Question: what are all the mutations that are associated with trial NCT02291289?
filters = [
{"facet":"ID","term":"NCT02291289"}
]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
# Note: must have tags activated on api key for this to work. Not all api key users get tags.
for tag in r.json()['rows'][0]['tags']:
if tag['facet'] == "MUTATION":
print(tag)
# Answer:
# 3 mutations are for inclusion criteria
# {'facet': 'MUTATION', 'term': 'EGFR P546S', 'alias': 'EGFR P546S', 'priority': '0', 'filterType': 'include'}
# {'facet': 'MUTATION', 'term': 'BRAF V600E', 'alias': 'BRAF V600E', 'priority': '0', 'filterType': 'include'}
# {'facet': 'MUTATION', 'term': 'Microsatellite instability', 'alias': 'Microsatellite instability', 'priority': '0', 'filterType': 'include'}
# 2 mutations are for exclusion criteria (filterType = 'exclude')
# {'facet': 'MUTATION', 'term': 'EGFR S492R', 'alias': 'EGFR S492R', 'priority': 1, 'filterType': 'exclude'}
# {'facet': 'MUTATION', 'term': 'BRAF G469L', 'alias': 'BRAF G469L', 'priority': 1, 'filterType': 'exclude'}
# See more about the trial data model at: https://api.molecularmatch.com/#trialDataModel
#### Mutation details lookup
# So you want to know everything there is to know about BRAF V600E?
url = mmService + resourceURLs["mutationGet"]
payload = {
'apiKey': apiKey,
'name': 'BRAF V600E'
}
r = requests.get(url, params=payload)
# Question: what databases have reported this mutation?
print(r.json()['sources'])
# Answer: 'COSMIC', 'CIViC', 'DoCM', 'cBioPortal', 'ClinVar'
# Question: is there a known protein domain this mutation is in?
for i in r.json()['parents']:
if (i['type'] == 'domain'):
print(i)
# Answer: BRAF Pkinase_Tyr domain (protein tyrosine kinase domain)
# What is the clinical interpretation of BRAF V600E? Are there trials, drugs, publications about it?
url = mmService + resourceURLs["mutationClassify"]
payload = {
'apiKey': apiKey,
'variant': 'BRAF V600E',
'condition': 'Lung cancer'
}
r = requests.post(url, json=payload)
# Question: How does MolecularMatch classify this mutation in this condition?
print(r.json()['classifications'][0]['classification'])
# Answer: actionable
# Question: How many drugs approved and on label for the condition provided?
print(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])
# Answer: 0
# Question: How many drugs approved but off-label for the condition provided?
print(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])
# Answer: 6
# Question: What about experimental drugs?
print(r.json()['classifications'][0]['drugsExperimentalCount'])
# Answer: 4
# Question: How many clinical trials are open for this mutation and condition?
print(r.json()['classifications'][0]['trialCount'])
# Answer: 24
# Question: Is there a lot of research publications about this mutation in this condition?
print(r.json()['classifications'][0]['publicationCount'])
# Answer: 47
# Question: Ok, what are these 4 experimental drugs?
url = mmService + resourceURLs["drugSearch"]
# set geneExpand for Drug to False so drugs return only for V600E, not BRAF (see https://api.molecularmatch.com/#geneExpansion)
filters = [
{'facet':'CONDITION','term':'Lung cancer'},
{'facet':'MUTATION','term':'BRAF V600E', "geneExpand": {"Drug": False}}
]
payload = {
'apiKey': apiKey,
'filters': filters,
'mode': 'discovery'
}
r = requests.post(url, json=payload)
for drug in r.json()['rows']:
print(drug)
if drug['approved'] == False:
print(drug['name'])
# Answer:
# Lgx818
# Plx8394
# BGB-283
# Cep-32496
##################################################################
#####################BASIC QUERIES################################
##################################################################
####################search drugs##################################
url = mmService + resourceURLs["drugSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters,
'mode': 'discovery' # 'criteriaunmet' # multiple modes avaiable for drugsearch. see api docs.
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
#####################search trials##################################
url = mmService + resourceURLs["trialSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
# Search trials by various ID types
filters = [
{"facet":"ID","term":"EUDRACT2017-003305-18"}
]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print('r here')
print(r.json())
#####################search publications#############################
url = mmService + resourceURLs["publicationSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
####################get mutation###################################
url = mmService + resourceURLs["mutationGet"]
payload = {
'apiKey': apiKey,
'name': 'BRAF V600E'
}
r = requests.get(url, params=payload)
print(json.dumps(r.json()))
######################get gene#################################
url = mmService + resourceURLs["geneGet"]
payload = {
'apiKey': apiKey,
'symbol': 'BRAF'
}
r = requests.get(url, params=payload)
print(json.dumps(r.json()))
######################classify mutation##############################
url = mmService + resourceURLs["mutationClassify"]
payload = {
'apiKey': apiKey,
'variant': 'EGFR T790M',
'condition': 'Lung cancer'
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
|
normal
|
{
"blob_id": "b4593b3229b88db26c5e200431d00838c357c8e0",
"index": 2359,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif apiKey == '<your api key>' and sys.argv[1]:\n apiKey = sys.argv[1]\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(r.json()['total'])\nfor i in np.arange(0, len(r.json()['rows'])):\n print(r.json()['rows'][i]['id'])\n print(r.json()['rows'][i]['briefTitle'])\n print(r.json()['rows'][i]['overallContact'])\n<mask token>\nfor tag in r.json()['rows'][0]['tags']:\n if tag['facet'] == 'MUTATION':\n print(tag)\n<mask token>\nprint(r.json()['sources'])\nfor i in r.json()['parents']:\n if i['type'] == 'domain':\n print(i)\n<mask token>\nprint(r.json()['classifications'][0]['classification'])\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\nprint(r.json()['classifications'][0]['trialCount'])\nprint(r.json()['classifications'][0]['publicationCount'])\n<mask token>\nfor drug in r.json()['rows']:\n print(drug)\n if drug['approved'] == False:\n print(drug['name'])\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint('r here')\nprint(r.json())\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n",
"step-3": "<mask token>\nresourceURLs = {'trialSearch': '/v2/search/trials', 'drugSearch':\n '/v2/search/drugs', 'publicationSearch': '/v2/search/publications',\n 'mutationGet': '/v2/mutation/get', 'geneGet': '/v2/gene/get',\n 'mutationClassify': '/v2/mutation/classify', 'validateTerms':\n '/v2/validate/terms', 'assertionSearch': '/v2/search/assertions',\n 'assertionExport': '/v2/export/assertions'}\nmmService = 'https://api.molecularmatch.com'\napiKey = '<your api key>'\nif apiKey == '<your api key>' and sys.argv[1]:\n apiKey = sys.argv[1]\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Colorectal cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E'}, {'facet': 'STATUS', 'term':\n 'Enrolling'}, {'facet': 'TRIALTYPE', 'term': 'Interventional'}, {\n 'facet': 'COUNTRY', 'term': 'France'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(r.json()['total'])\nfor i in np.arange(0, len(r.json()['rows'])):\n print(r.json()['rows'][i]['id'])\n print(r.json()['rows'][i]['briefTitle'])\n print(r.json()['rows'][i]['overallContact'])\nfilters = [{'facet': 'ID', 'term': 'NCT02291289'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nfor tag in r.json()['rows'][0]['tags']:\n if tag['facet'] == 'MUTATION':\n print(tag)\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(r.json()['sources'])\nfor i in r.json()['parents']:\n if i['type'] == 'domain':\n print(i)\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'BRAF V600E', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(r.json()['classifications'][0]['classification'])\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\nprint(r.json()['classifications'][0]['trialCount'])\nprint(r.json()['classifications'][0]['publicationCount'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E', 'geneExpand': {'Drug': False}}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n print(drug)\n if drug['approved'] == False:\n print(drug['name'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nfilters = [{'facet': 'ID', 'term': 'EUDRACT2017-003305-18'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\nurl = mmService + resourceURLs['publicationSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['geneGet']\npayload = {'apiKey': apiKey, 'symbol': 'BRAF'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'EGFR T790M', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n",
"step-4": "import requests\nimport json\nimport numpy as np\nimport sys\nresourceURLs = {'trialSearch': '/v2/search/trials', 'drugSearch':\n '/v2/search/drugs', 'publicationSearch': '/v2/search/publications',\n 'mutationGet': '/v2/mutation/get', 'geneGet': '/v2/gene/get',\n 'mutationClassify': '/v2/mutation/classify', 'validateTerms':\n '/v2/validate/terms', 'assertionSearch': '/v2/search/assertions',\n 'assertionExport': '/v2/export/assertions'}\nmmService = 'https://api.molecularmatch.com'\napiKey = '<your api key>'\nif apiKey == '<your api key>' and sys.argv[1]:\n apiKey = sys.argv[1]\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Colorectal cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E'}, {'facet': 'STATUS', 'term':\n 'Enrolling'}, {'facet': 'TRIALTYPE', 'term': 'Interventional'}, {\n 'facet': 'COUNTRY', 'term': 'France'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(r.json()['total'])\nfor i in np.arange(0, len(r.json()['rows'])):\n print(r.json()['rows'][i]['id'])\n print(r.json()['rows'][i]['briefTitle'])\n print(r.json()['rows'][i]['overallContact'])\nfilters = [{'facet': 'ID', 'term': 'NCT02291289'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nfor tag in r.json()['rows'][0]['tags']:\n if tag['facet'] == 'MUTATION':\n print(tag)\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(r.json()['sources'])\nfor i in r.json()['parents']:\n if i['type'] == 'domain':\n print(i)\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'BRAF V600E', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(r.json()['classifications'][0]['classification'])\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\nprint(r.json()['classifications'][0]['trialCount'])\nprint(r.json()['classifications'][0]['publicationCount'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E', 'geneExpand': {'Drug': False}}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n print(drug)\n if drug['approved'] == False:\n print(drug['name'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nfilters = [{'facet': 'ID', 'term': 'EUDRACT2017-003305-18'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\nurl = mmService + resourceURLs['publicationSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['geneGet']\npayload = {'apiKey': apiKey, 'symbol': 'BRAF'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'EGFR T790M', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n",
"step-5": "# MolecularMatch API (MM-DATA) Python Example Sheet\n# Based on documentation at https://api.molecularmatch.com\n# Author: Shane Neeley, MolecularMatch Inc., Jan. 30, 2018\n\nimport requests\nimport json\nimport numpy as np\nimport sys\n\nresourceURLs = {\n\t\"trialSearch\": \"/v2/search/trials\",\n\t\"drugSearch\": \"/v2/search/drugs\",\n\t\"publicationSearch\": \"/v2/search/publications\",\n\t\"mutationGet\": \"/v2/mutation/get\",\n\t\"geneGet\": \"/v2/gene/get\",\n\t\"mutationClassify\": \"/v2/mutation/classify\",\n\t\"validateTerms\": \"/v2/validate/terms\",\n\t\"assertionSearch\": \"/v2/search/assertions\",\n\t\"assertionExport\": \"/v2/export/assertions\"\n}\nmmService = \"https://api.molecularmatch.com\"\n\n# CHANGE THIS TO YOUR KEY or use as parameter (e.g. $ python3 publicationsAPI.py key)\napiKey = '<your api key>'\nif apiKey == '<your api key>' and sys.argv[1]:\n\tapiKey = sys.argv[1]\n\n#// TODO: geolocation searches\n\n#####################search trials##################################\n\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n##################################################################\n#####################SCENARIOS####################################\n##################################################################\n\n#### Clinical trial reporting\n\n# When looking up trials for an actual patient, it is important to include the filters of Enrolling and Interventional\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [\n\t{\"facet\":\"CONDITION\",\"term\":\"Colorectal cancer\"},\n\t{\"facet\":\"MUTATION\",\"term\":\"BRAF V600E\"},\n\t{\"facet\":\"STATUS\", \"term\":\"Enrolling\"},\n\t{\"facet\":\"TRIALTYPE\", \"term\":\"Interventional\"},\n\t{\"facet\":\"COUNTRY\", \"term\":\"France\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\n\n# Question: how many trials for a patient with this mutation and disease are interventional and enrolling in France?\nprint(r.json()['total'])\n# Answer: 4\n\n# Question: what are these trials ClinicalTrials.gov IDs and titles and email addresses for contact?\nfor i in np.arange(0, len(r.json()['rows']) ):\n\tprint(r.json()['rows'][i]['id'])\n\tprint(r.json()['rows'][i]['briefTitle'])\n\tprint(r.json()['rows'][i]['overallContact'])\n# Answer:\n# NCT02291289 - A Multi-Center Study of Biomarker-Driven Therapy in Metastatic Colorectal Cancer - [email protected]\n# NCT01677741 - A Study to Determine Safety, Tolerability and Pharmacokinetics of Oral Dabrafenib In Children and Adolescent Subjects - [email protected]\n# NCT02788279 - A Study to Investigate Efficacy and Safety of Cobimetinib Plus Atezolizumab and Atezolizumab Monotherapy Versus Regorafenib in Participants With Metastatic Colorectal Adenocarcinoma - [email protected]\n# NCT02751177 - Detection of KRAS, NRAS et BRAF Mutations in Plasma Circulating DNA From Patients With Metastatic Colorectal Cancer - [email protected]\n\n# Question: what are all the mutations that are associated with trial NCT02291289?\nfilters = [\n\t{\"facet\":\"ID\",\"term\":\"NCT02291289\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\n# Note: must have tags activated on api key for this to work. Not all api key users get tags.\nfor tag in r.json()['rows'][0]['tags']:\n\tif tag['facet'] == \"MUTATION\":\n\t\tprint(tag)\n\n# Answer:\n# 3 mutations are for inclusion criteria\n# {'facet': 'MUTATION', 'term': 'EGFR P546S', 'alias': 'EGFR P546S', 'priority': '0', 'filterType': 'include'}\n# {'facet': 'MUTATION', 'term': 'BRAF V600E', 'alias': 'BRAF V600E', 'priority': '0', 'filterType': 'include'}\n# {'facet': 'MUTATION', 'term': 'Microsatellite instability', 'alias': 'Microsatellite instability', 'priority': '0', 'filterType': 'include'}\n# 2 mutations are for exclusion criteria (filterType = 'exclude')\n# {'facet': 'MUTATION', 'term': 'EGFR S492R', 'alias': 'EGFR S492R', 'priority': 1, 'filterType': 'exclude'}\n# {'facet': 'MUTATION', 'term': 'BRAF G469L', 'alias': 'BRAF G469L', 'priority': 1, 'filterType': 'exclude'}\n\n# See more about the trial data model at: https://api.molecularmatch.com/#trialDataModel\n\n#### Mutation details lookup\n\n# So you want to know everything there is to know about BRAF V600E?\n\nurl = mmService + resourceURLs[\"mutationGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'name': 'BRAF V600E'\n}\nr = requests.get(url, params=payload)\n\n# Question: what databases have reported this mutation?\nprint(r.json()['sources'])\n# Answer: 'COSMIC', 'CIViC', 'DoCM', 'cBioPortal', 'ClinVar'\n\n# Question: is there a known protein domain this mutation is in?\nfor i in r.json()['parents']:\n\tif (i['type'] == 'domain'):\n\t\tprint(i)\n# Answer: BRAF Pkinase_Tyr domain (protein tyrosine kinase domain)\n\n# What is the clinical interpretation of BRAF V600E? Are there trials, drugs, publications about it?\n\nurl = mmService + resourceURLs[\"mutationClassify\"]\npayload = {\n\t'apiKey': apiKey,\n\t'variant': 'BRAF V600E',\n\t'condition': 'Lung cancer'\n}\nr = requests.post(url, json=payload)\n\n# Question: How does MolecularMatch classify this mutation in this condition?\nprint(r.json()['classifications'][0]['classification'])\n# Answer: actionable\n\n# Question: How many drugs approved and on label for the condition provided?\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\n# Answer: 0\n\n# Question: How many drugs approved but off-label for the condition provided?\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\n# Answer: 6\n\n# Question: What about experimental drugs?\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\n# Answer: 4\n\n# Question: How many clinical trials are open for this mutation and condition?\nprint(r.json()['classifications'][0]['trialCount'])\n# Answer: 24\n\n# Question: Is there a lot of research publications about this mutation in this condition?\nprint(r.json()['classifications'][0]['publicationCount'])\n# Answer: 47\n\n# Question: Ok, what are these 4 experimental drugs?\nurl = mmService + resourceURLs[\"drugSearch\"]\n# set geneExpand for Drug to False so drugs return only for V600E, not BRAF (see https://api.molecularmatch.com/#geneExpansion)\nfilters = [\n\t{'facet':'CONDITION','term':'Lung cancer'},\n\t{'facet':'MUTATION','term':'BRAF V600E', \"geneExpand\": {\"Drug\": False}}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters,\n\t'mode': 'discovery'\n}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n\tprint(drug)\n\tif drug['approved'] == False:\n\t\tprint(drug['name'])\n\n# Answer:\n# Lgx818\n# Plx8394\n# BGB-283\n# Cep-32496\n\n##################################################################\n#####################BASIC QUERIES################################\n##################################################################\n\n####################search drugs##################################\n\nurl = mmService + resourceURLs[\"drugSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters,\n\t'mode': 'discovery' # 'criteriaunmet' # multiple modes avaiable for drugsearch. see api docs.\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n#####################search trials##################################\n\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n# Search trials by various ID types\nfilters = [\n\t{\"facet\":\"ID\",\"term\":\"EUDRACT2017-003305-18\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\n\n#####################search publications#############################\n\nurl = mmService + resourceURLs[\"publicationSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n####################get mutation###################################\n\nurl = mmService + resourceURLs[\"mutationGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'name': 'BRAF V600E'\n}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\n\n######################get gene#################################\n\nurl = mmService + resourceURLs[\"geneGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'symbol': 'BRAF'\n}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\n\n######################classify mutation##############################\n\nurl = mmService + resourceURLs[\"mutationClassify\"]\npayload = {\n\t'apiKey': apiKey,\n\t'variant': 'EGFR T790M',\n\t'condition': 'Lung cancer'\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def in_bound(dim, s):
"""Get inbound pixel coordinate for out-of-bound
Args:
dim (int): Image height or width
s (int): Coordinate
Returns:
int: Inbound
"""
if s <= -1:
return 0
elif s >= dim:
return dim - 1
else:
return s
<|reserved_special_token_0|>
def set_pixel(image, c):
image['pixels'].append(c)
def apply_per_pixel(image, func):
"""Apply func on every pixel of image
Args:
image (dict) : Image to be applied func at
func (function): Function to be applied
Returns:
dict: Modified image
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
for x in range(image['height']):
for y in range(image['width']):
color = get_pixel(image, x, y)
newcolor = func(color)
set_pixel(result, newcolor)
return result
<|reserved_special_token_0|>
def round_and_clip_image(image):
"""
Given a dictionary, ensure that the values in the 'pixels' list are all
integers in the range [0, 255].
All values should be converted to integers using Python's `round` function.
Any locations with values higher than 255 in the input should have value
255 in the output; and any locations with values lower than 0 in the input
should have value 0 in the output.
"""
for idx, pixel in enumerate(image['pixels']):
if round(pixel) < 0:
image['pixels'][idx] = 0
elif round(pixel) > 255:
image['pixels'][idx] = 255
else:
image['pixels'][idx] = round(pixel)
return image
def get_blur_kernel(n):
""" Get kernel to blur an image
Args:
n (int): kernel size
Returns:
list: kernel
"""
return [1 / n ** 2] * n ** 2
def blurred(image, n, correct=True):
"""
Return a new image representing the result of applying a box blur (with
kernel size n) to the given input image.
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
"""
kernel = get_blur_kernel(n)
correlated = correlate(image, kernel)
if correct:
return round_and_clip_image(correlated)
else:
return correlated
<|reserved_special_token_0|>
def load_image(filename):
"""
Loads an image from the given file and returns a dictionary
representing that image. This also performs conversion to greyscale.
Invoked as, for example:
i = load_image('test_images/cat.png')
"""
with open(filename, 'rb') as img_handle:
img = Image.open(img_handle)
img_data = img.getdata()
if img.mode.startswith('RGB'):
pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for
p in img_data]
elif img.mode == 'LA':
pixels = [p[0] for p in img_data]
elif img.mode == 'L':
pixels = list(img_data)
else:
raise ValueError('Unsupported image mode: %r' % img.mode)
w, h = img.size
return {'height': h, 'width': w, 'pixels': pixels}
def save_image(image, filename, mode='PNG'):
"""
Saves the given image to disk or to a file-like object. If filename is
given as a string, the file type will be inferred from the given name. If
filename is given as a file-like object, the file type will be determined
by the 'mode' parameter.
"""
out = Image.new(mode='L', size=(image['width'], image['height']))
out.putdata(image['pixels'])
if isinstance(filename, str):
out.save(filename)
else:
out.save(filename, mode)
out.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def in_bound(dim, s):
"""Get inbound pixel coordinate for out-of-bound
Args:
dim (int): Image height or width
s (int): Coordinate
Returns:
int: Inbound
"""
if s <= -1:
return 0
elif s >= dim:
return dim - 1
else:
return s
<|reserved_special_token_0|>
def set_pixel(image, c):
image['pixels'].append(c)
def apply_per_pixel(image, func):
"""Apply func on every pixel of image
Args:
image (dict) : Image to be applied func at
func (function): Function to be applied
Returns:
dict: Modified image
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
for x in range(image['height']):
for y in range(image['width']):
color = get_pixel(image, x, y)
newcolor = func(color)
set_pixel(result, newcolor)
return result
def inverted(image):
"""Invert given image
Args:
image (dict): Input image
Returns:
dict: Inverted image
"""
return apply_per_pixel(image, lambda c: 255 - c)
def correlate(image, kernel):
"""
Compute the result of correlating the given image with the given kernel.
The output of this function should have the same form as a 6.009 image (a
dictionary with 'height', 'width', and 'pixels' keys), but its pixel values
do not necessarily need to be in the range [0,255], nor do they need to be
integers (they should not be clipped or rounded at all).
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
kernel = [k1, k2, k3, ... kn]
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
kernel_size = int(len(kernel) ** (1 / 2))
num_layers = int((kernel_size - 1) / 2)
for x in range(image['height']):
for y in range(image['width']):
newpixel = 0.0
for h in range(kernel_size):
for w in range(kernel_size):
newpixel += kernel[h * kernel_size + w] * get_pixel(image,
x - num_layers + h, y - num_layers + w)
set_pixel(result, newpixel)
return result
def round_and_clip_image(image):
"""
Given a dictionary, ensure that the values in the 'pixels' list are all
integers in the range [0, 255].
All values should be converted to integers using Python's `round` function.
Any locations with values higher than 255 in the input should have value
255 in the output; and any locations with values lower than 0 in the input
should have value 0 in the output.
"""
for idx, pixel in enumerate(image['pixels']):
if round(pixel) < 0:
image['pixels'][idx] = 0
elif round(pixel) > 255:
image['pixels'][idx] = 255
else:
image['pixels'][idx] = round(pixel)
return image
def get_blur_kernel(n):
""" Get kernel to blur an image
Args:
n (int): kernel size
Returns:
list: kernel
"""
return [1 / n ** 2] * n ** 2
def blurred(image, n, correct=True):
"""
Return a new image representing the result of applying a box blur (with
kernel size n) to the given input image.
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
"""
kernel = get_blur_kernel(n)
correlated = correlate(image, kernel)
if correct:
return round_and_clip_image(correlated)
else:
return correlated
<|reserved_special_token_0|>
def load_image(filename):
"""
Loads an image from the given file and returns a dictionary
representing that image. This also performs conversion to greyscale.
Invoked as, for example:
i = load_image('test_images/cat.png')
"""
with open(filename, 'rb') as img_handle:
img = Image.open(img_handle)
img_data = img.getdata()
if img.mode.startswith('RGB'):
pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for
p in img_data]
elif img.mode == 'LA':
pixels = [p[0] for p in img_data]
elif img.mode == 'L':
pixels = list(img_data)
else:
raise ValueError('Unsupported image mode: %r' % img.mode)
w, h = img.size
return {'height': h, 'width': w, 'pixels': pixels}
def save_image(image, filename, mode='PNG'):
"""
Saves the given image to disk or to a file-like object. If filename is
given as a string, the file type will be inferred from the given name. If
filename is given as a file-like object, the file type will be determined
by the 'mode' parameter.
"""
out = Image.new(mode='L', size=(image['width'], image['height']))
out.putdata(image['pixels'])
if isinstance(filename, str):
out.save(filename)
else:
out.save(filename, mode)
out.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def in_bound(dim, s):
"""Get inbound pixel coordinate for out-of-bound
Args:
dim (int): Image height or width
s (int): Coordinate
Returns:
int: Inbound
"""
if s <= -1:
return 0
elif s >= dim:
return dim - 1
else:
return s
def get_pixel(image, x, y):
"""Get pixel of image from coordinates
Args:
image (dict): Image to get pixel from
x (int): x coordinate
y (int): y coordinate
Returns:
int: Pixel value
"""
x = in_bound(image['height'], x)
y = in_bound(image['width'], y)
return image['pixels'][x * image['width'] + y]
def set_pixel(image, c):
image['pixels'].append(c)
def apply_per_pixel(image, func):
"""Apply func on every pixel of image
Args:
image (dict) : Image to be applied func at
func (function): Function to be applied
Returns:
dict: Modified image
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
for x in range(image['height']):
for y in range(image['width']):
color = get_pixel(image, x, y)
newcolor = func(color)
set_pixel(result, newcolor)
return result
def inverted(image):
"""Invert given image
Args:
image (dict): Input image
Returns:
dict: Inverted image
"""
return apply_per_pixel(image, lambda c: 255 - c)
def correlate(image, kernel):
"""
Compute the result of correlating the given image with the given kernel.
The output of this function should have the same form as a 6.009 image (a
dictionary with 'height', 'width', and 'pixels' keys), but its pixel values
do not necessarily need to be in the range [0,255], nor do they need to be
integers (they should not be clipped or rounded at all).
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
kernel = [k1, k2, k3, ... kn]
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
kernel_size = int(len(kernel) ** (1 / 2))
num_layers = int((kernel_size - 1) / 2)
for x in range(image['height']):
for y in range(image['width']):
newpixel = 0.0
for h in range(kernel_size):
for w in range(kernel_size):
newpixel += kernel[h * kernel_size + w] * get_pixel(image,
x - num_layers + h, y - num_layers + w)
set_pixel(result, newpixel)
return result
def round_and_clip_image(image):
"""
Given a dictionary, ensure that the values in the 'pixels' list are all
integers in the range [0, 255].
All values should be converted to integers using Python's `round` function.
Any locations with values higher than 255 in the input should have value
255 in the output; and any locations with values lower than 0 in the input
should have value 0 in the output.
"""
for idx, pixel in enumerate(image['pixels']):
if round(pixel) < 0:
image['pixels'][idx] = 0
elif round(pixel) > 255:
image['pixels'][idx] = 255
else:
image['pixels'][idx] = round(pixel)
return image
def get_blur_kernel(n):
""" Get kernel to blur an image
Args:
n (int): kernel size
Returns:
list: kernel
"""
return [1 / n ** 2] * n ** 2
def blurred(image, n, correct=True):
"""
Return a new image representing the result of applying a box blur (with
kernel size n) to the given input image.
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
"""
kernel = get_blur_kernel(n)
correlated = correlate(image, kernel)
if correct:
return round_and_clip_image(correlated)
else:
return correlated
def sharpened(image, n):
"""Sharpen the given image
Args:
image (dict): Given image
n (int): Kernel size
Returns:
dict: Sharpened image
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
result['pixels'] = [(2 * x - y) for x, y in zip(image['pixels'],
blurred(image, n, False)['pixels'])]
return round_and_clip_image(result)
def edges(i):
"""Performs Sobel Operation on given image
Args:
i (dict): Input image
Returns:
dict: Resulting Image
"""
Oxy = i.copy()
Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]
Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]
Ox = correlate(i, Kx)
Oy = correlate(i, Ky)
Oxy['pixels'] = [((x ** 2 + y ** 2) ** (1 / 2)) for x, y in zip(Ox[
'pixels'], Oy['pixels'])]
result = round_and_clip_image(Oxy)
return result
def load_image(filename):
"""
Loads an image from the given file and returns a dictionary
representing that image. This also performs conversion to greyscale.
Invoked as, for example:
i = load_image('test_images/cat.png')
"""
with open(filename, 'rb') as img_handle:
img = Image.open(img_handle)
img_data = img.getdata()
if img.mode.startswith('RGB'):
pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for
p in img_data]
elif img.mode == 'LA':
pixels = [p[0] for p in img_data]
elif img.mode == 'L':
pixels = list(img_data)
else:
raise ValueError('Unsupported image mode: %r' % img.mode)
w, h = img.size
return {'height': h, 'width': w, 'pixels': pixels}
def save_image(image, filename, mode='PNG'):
"""
Saves the given image to disk or to a file-like object. If filename is
given as a string, the file type will be inferred from the given name. If
filename is given as a file-like object, the file type will be determined
by the 'mode' parameter.
"""
out = Image.new(mode='L', size=(image['width'], image['height']))
out.putdata(image['pixels'])
if isinstance(filename, str):
out.save(filename)
else:
out.save(filename, mode)
out.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import math
from PIL import Image as Image
def in_bound(dim, s):
"""Get inbound pixel coordinate for out-of-bound
Args:
dim (int): Image height or width
s (int): Coordinate
Returns:
int: Inbound
"""
if s <= -1:
return 0
elif s >= dim:
return dim - 1
else:
return s
def get_pixel(image, x, y):
"""Get pixel of image from coordinates
Args:
image (dict): Image to get pixel from
x (int): x coordinate
y (int): y coordinate
Returns:
int: Pixel value
"""
x = in_bound(image['height'], x)
y = in_bound(image['width'], y)
return image['pixels'][x * image['width'] + y]
def set_pixel(image, c):
image['pixels'].append(c)
def apply_per_pixel(image, func):
"""Apply func on every pixel of image
Args:
image (dict) : Image to be applied func at
func (function): Function to be applied
Returns:
dict: Modified image
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
for x in range(image['height']):
for y in range(image['width']):
color = get_pixel(image, x, y)
newcolor = func(color)
set_pixel(result, newcolor)
return result
def inverted(image):
"""Invert given image
Args:
image (dict): Input image
Returns:
dict: Inverted image
"""
return apply_per_pixel(image, lambda c: 255 - c)
def correlate(image, kernel):
"""
Compute the result of correlating the given image with the given kernel.
The output of this function should have the same form as a 6.009 image (a
dictionary with 'height', 'width', and 'pixels' keys), but its pixel values
do not necessarily need to be in the range [0,255], nor do they need to be
integers (they should not be clipped or rounded at all).
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
kernel = [k1, k2, k3, ... kn]
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
kernel_size = int(len(kernel) ** (1 / 2))
num_layers = int((kernel_size - 1) / 2)
for x in range(image['height']):
for y in range(image['width']):
newpixel = 0.0
for h in range(kernel_size):
for w in range(kernel_size):
newpixel += kernel[h * kernel_size + w] * get_pixel(image,
x - num_layers + h, y - num_layers + w)
set_pixel(result, newpixel)
return result
def round_and_clip_image(image):
"""
Given a dictionary, ensure that the values in the 'pixels' list are all
integers in the range [0, 255].
All values should be converted to integers using Python's `round` function.
Any locations with values higher than 255 in the input should have value
255 in the output; and any locations with values lower than 0 in the input
should have value 0 in the output.
"""
for idx, pixel in enumerate(image['pixels']):
if round(pixel) < 0:
image['pixels'][idx] = 0
elif round(pixel) > 255:
image['pixels'][idx] = 255
else:
image['pixels'][idx] = round(pixel)
return image
def get_blur_kernel(n):
""" Get kernel to blur an image
Args:
n (int): kernel size
Returns:
list: kernel
"""
return [1 / n ** 2] * n ** 2
def blurred(image, n, correct=True):
"""
Return a new image representing the result of applying a box blur (with
kernel size n) to the given input image.
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
"""
kernel = get_blur_kernel(n)
correlated = correlate(image, kernel)
if correct:
return round_and_clip_image(correlated)
else:
return correlated
def sharpened(image, n):
"""Sharpen the given image
Args:
image (dict): Given image
n (int): Kernel size
Returns:
dict: Sharpened image
"""
result = {'height': image['height'], 'width': image['width'], 'pixels': []}
result['pixels'] = [(2 * x - y) for x, y in zip(image['pixels'],
blurred(image, n, False)['pixels'])]
return round_and_clip_image(result)
def edges(i):
"""Performs Sobel Operation on given image
Args:
i (dict): Input image
Returns:
dict: Resulting Image
"""
Oxy = i.copy()
Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]
Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]
Ox = correlate(i, Kx)
Oy = correlate(i, Ky)
Oxy['pixels'] = [((x ** 2 + y ** 2) ** (1 / 2)) for x, y in zip(Ox[
'pixels'], Oy['pixels'])]
result = round_and_clip_image(Oxy)
return result
def load_image(filename):
"""
Loads an image from the given file and returns a dictionary
representing that image. This also performs conversion to greyscale.
Invoked as, for example:
i = load_image('test_images/cat.png')
"""
with open(filename, 'rb') as img_handle:
img = Image.open(img_handle)
img_data = img.getdata()
if img.mode.startswith('RGB'):
pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for
p in img_data]
elif img.mode == 'LA':
pixels = [p[0] for p in img_data]
elif img.mode == 'L':
pixels = list(img_data)
else:
raise ValueError('Unsupported image mode: %r' % img.mode)
w, h = img.size
return {'height': h, 'width': w, 'pixels': pixels}
def save_image(image, filename, mode='PNG'):
"""
Saves the given image to disk or to a file-like object. If filename is
given as a string, the file type will be inferred from the given name. If
filename is given as a file-like object, the file type will be determined
by the 'mode' parameter.
"""
out = Image.new(mode='L', size=(image['width'], image['height']))
out.putdata(image['pixels'])
if isinstance(filename, str):
out.save(filename)
else:
out.save(filename, mode)
out.close()
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
#!/usr/bin/env python3
import math
from PIL import Image as Image
# NO ADDITIONAL IMPORTS ALLOWED!
def in_bound(dim , s):
"""Get inbound pixel coordinate for out-of-bound
Args:
dim (int): Image height or width
s (int): Coordinate
Returns:
int: Inbound
"""
if s <= -1:
return 0
elif s >= dim:
return dim - 1
else:
return s
def get_pixel(image, x, y):
"""Get pixel of image from coordinates
Args:
image (dict): Image to get pixel from
x (int): x coordinate
y (int): y coordinate
Returns:
int: Pixel value
"""
x = in_bound(image["height"], x)
y = in_bound(image["width"], y)
return image['pixels'][ x * image["width"] + y]
def set_pixel(image, c):
image['pixels'].append(c)
def apply_per_pixel(image, func):
"""Apply func on every pixel of image
Args:
image (dict) : Image to be applied func at
func (function): Function to be applied
Returns:
dict: Modified image
"""
result = {
'height': image['height'],
'width': image['width'],
'pixels': [],
}
for x in range(image['height']):
for y in range(image['width']):
color = get_pixel(image, x, y)
newcolor = func(color)
set_pixel(result, newcolor)
return result
def inverted(image):
"""Invert given image
Args:
image (dict): Input image
Returns:
dict: Inverted image
"""
return apply_per_pixel(image, lambda c: 255-c)
# HELPER FUNCTIONS
def correlate(image, kernel):
"""
Compute the result of correlating the given image with the given kernel.
The output of this function should have the same form as a 6.009 image (a
dictionary with 'height', 'width', and 'pixels' keys), but its pixel values
do not necessarily need to be in the range [0,255], nor do they need to be
integers (they should not be clipped or rounded at all).
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
kernel = [k1, k2, k3, ... kn]
"""
result = {"height":image["height"],
"width":image["width"],
"pixels": []}
kernel_size = int(len(kernel) ** (1/2))
num_layers = int((kernel_size - 1)/2) # Layers of kernel excluding center
for x in range(image["height"]):
for y in range(image["width"]):
newpixel = 0.0
for h in range(kernel_size):
for w in range(kernel_size):
# O[x,y] = (K[h,w] * I[x - num_layers + h, y - num_layers + w])
newpixel += kernel[h * kernel_size + w] * get_pixel(image,x - num_layers + h, y - num_layers + w)
set_pixel(result, newpixel)
return result
def round_and_clip_image(image):
"""
Given a dictionary, ensure that the values in the 'pixels' list are all
integers in the range [0, 255].
All values should be converted to integers using Python's `round` function.
Any locations with values higher than 255 in the input should have value
255 in the output; and any locations with values lower than 0 in the input
should have value 0 in the output.
"""
for idx, pixel in enumerate(image["pixels"]):
if round(pixel) < 0 :
image["pixels"][idx] = 0
elif round(pixel) > 255 :
image["pixels"][idx] = 255
else:
image["pixels"][idx] = round(pixel)
return image
# FILTERS
# helpers
def get_blur_kernel(n):
""" Get kernel to blur an image
Args:
n (int): kernel size
Returns:
list: kernel
"""
return [1/n**2] * n**2
def blurred(image, n ,correct = True):
"""
Return a new image representing the result of applying a box blur (with
kernel size n) to the given input image.
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
"""
# first, create a representation for the appropriate n-by-n kernel (you may
# wish to define another helper function for this)
kernel = get_blur_kernel(n)
# then compute the correlation of the input image with that kernel
correlated = correlate(image, kernel)
# and, finally, make sure that the output is a valid image (using the
# helper function from above) before returning it.
if correct:
return round_and_clip_image(correlated)
else:
return correlated
def sharpened(image, n):
"""Sharpen the given image
Args:
image (dict): Given image
n (int): Kernel size
Returns:
dict: Sharpened image
"""
result = {"height": image["height"],
"width":image["width"],
"pixels":[]}
result["pixels"] = [2*x - y for x,y in zip(image["pixels"], blurred(image, n ,False)["pixels"])]
return round_and_clip_image(result)
def edges(i):
"""Performs Sobel Operation on given image
Args:
i (dict): Input image
Returns:
dict: Resulting Image
"""
Oxy = i.copy()
Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]
Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]
Ox = correlate(i, Kx)
Oy = correlate(i,Ky)
Oxy["pixels"] = [ (x**2 + y**2)**(1/2) for x, y in zip(Ox["pixels"], Oy["pixels"])]
result = round_and_clip_image(Oxy)
return result
# HELPER FUNCTIONS FOR LOADING AND SAVING IMAGES
def load_image(filename):
"""
Loads an image from the given file and returns a dictionary
representing that image. This also performs conversion to greyscale.
Invoked as, for example:
i = load_image('test_images/cat.png')
"""
with open(filename, 'rb') as img_handle:
img = Image.open(img_handle)
img_data = img.getdata()
if img.mode.startswith('RGB'):
pixels = [round(.299 * p[0] + .587 * p[1] + .114 * p[2])
for p in img_data]
elif img.mode == 'LA':
pixels = [p[0] for p in img_data]
elif img.mode == 'L':
pixels = list(img_data)
else:
raise ValueError('Unsupported image mode: %r' % img.mode)
w, h = img.size
return {'height': h, 'width': w, 'pixels': pixels}
def save_image(image, filename, mode='PNG'):
"""
Saves the given image to disk or to a file-like object. If filename is
given as a string, the file type will be inferred from the given name. If
filename is given as a file-like object, the file type will be determined
by the 'mode' parameter.
"""
out = Image.new(mode='L', size=(image['width'], image['height']))
out.putdata(image['pixels'])
if isinstance(filename, str):
out.save(filename)
else:
out.save(filename, mode)
out.close()
if __name__ == '__main__':
# code in this block will only be run when you explicitly run your script,
# and not when the tests are being run. this is a good place for
# generating images, etc.
# 3.3 - Run your inversion filter
# bluegill = load_image("test_images/bluegill.png")
# inverted_bluegill = inverted(bluegill)
# save_image(inverted_bluegill, "test_images/inverted_bluegill.png")
pass
|
flexible
|
{
"blob_id": "591b1a2e245ae0f3c9b2a81769bbf5988574ed07",
"index": 8253,
"step-1": "<mask token>\n\n\ndef in_bound(dim, s):\n \"\"\"Get inbound pixel coordinate for out-of-bound\n\n Args:\n dim (int): Image height or width\n s (int): Coordinate \n\n Returns:\n int: Inbound\n \"\"\"\n if s <= -1:\n return 0\n elif s >= dim:\n return dim - 1\n else:\n return s\n\n\n<mask token>\n\n\ndef set_pixel(image, c):\n image['pixels'].append(c)\n\n\ndef apply_per_pixel(image, func):\n \"\"\"Apply func on every pixel of image\n\n Args:\n image (dict) : Image to be applied func at\n func (function): Function to be applied\n\n Returns:\n dict: Modified image\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n for x in range(image['height']):\n for y in range(image['width']):\n color = get_pixel(image, x, y)\n newcolor = func(color)\n set_pixel(result, newcolor)\n return result\n\n\n<mask token>\n\n\ndef round_and_clip_image(image):\n \"\"\"\n Given a dictionary, ensure that the values in the 'pixels' list are all\n integers in the range [0, 255].\n\n All values should be converted to integers using Python's `round` function.\n\n Any locations with values higher than 255 in the input should have value\n 255 in the output; and any locations with values lower than 0 in the input\n should have value 0 in the output.\n \"\"\"\n for idx, pixel in enumerate(image['pixels']):\n if round(pixel) < 0:\n image['pixels'][idx] = 0\n elif round(pixel) > 255:\n image['pixels'][idx] = 255\n else:\n image['pixels'][idx] = round(pixel)\n return image\n\n\ndef get_blur_kernel(n):\n \"\"\" Get kernel to blur an image\n\n Args:\n n (int): kernel size\n\n Returns:\n list: kernel\n \"\"\"\n return [1 / n ** 2] * n ** 2\n\n\ndef blurred(image, n, correct=True):\n \"\"\"\n Return a new image representing the result of applying a box blur (with\n kernel size n) to the given input image.\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n \"\"\"\n kernel = get_blur_kernel(n)\n correlated = correlate(image, kernel)\n if correct:\n return round_and_clip_image(correlated)\n else:\n return correlated\n\n\n<mask token>\n\n\ndef load_image(filename):\n \"\"\"\n Loads an image from the given file and returns a dictionary\n representing that image. This also performs conversion to greyscale.\n\n Invoked as, for example:\n i = load_image('test_images/cat.png')\n \"\"\"\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for\n p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}\n\n\ndef save_image(image, filename, mode='PNG'):\n \"\"\"\n Saves the given image to disk or to a file-like object. If filename is\n given as a string, the file type will be inferred from the given name. If\n filename is given as a file-like object, the file type will be determined\n by the 'mode' parameter.\n \"\"\"\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef in_bound(dim, s):\n \"\"\"Get inbound pixel coordinate for out-of-bound\n\n Args:\n dim (int): Image height or width\n s (int): Coordinate \n\n Returns:\n int: Inbound\n \"\"\"\n if s <= -1:\n return 0\n elif s >= dim:\n return dim - 1\n else:\n return s\n\n\n<mask token>\n\n\ndef set_pixel(image, c):\n image['pixels'].append(c)\n\n\ndef apply_per_pixel(image, func):\n \"\"\"Apply func on every pixel of image\n\n Args:\n image (dict) : Image to be applied func at\n func (function): Function to be applied\n\n Returns:\n dict: Modified image\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n for x in range(image['height']):\n for y in range(image['width']):\n color = get_pixel(image, x, y)\n newcolor = func(color)\n set_pixel(result, newcolor)\n return result\n\n\ndef inverted(image):\n \"\"\"Invert given image\n\n Args:\n image (dict): Input image\n\n Returns:\n dict: Inverted image\n \"\"\"\n return apply_per_pixel(image, lambda c: 255 - c)\n\n\ndef correlate(image, kernel):\n \"\"\"\n Compute the result of correlating the given image with the given kernel.\n\n The output of this function should have the same form as a 6.009 image (a\n dictionary with 'height', 'width', and 'pixels' keys), but its pixel values\n do not necessarily need to be in the range [0,255], nor do they need to be\n integers (they should not be clipped or rounded at all).\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n\n kernel = [k1, k2, k3, ... kn]\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n kernel_size = int(len(kernel) ** (1 / 2))\n num_layers = int((kernel_size - 1) / 2)\n for x in range(image['height']):\n for y in range(image['width']):\n newpixel = 0.0\n for h in range(kernel_size):\n for w in range(kernel_size):\n newpixel += kernel[h * kernel_size + w] * get_pixel(image,\n x - num_layers + h, y - num_layers + w)\n set_pixel(result, newpixel)\n return result\n\n\ndef round_and_clip_image(image):\n \"\"\"\n Given a dictionary, ensure that the values in the 'pixels' list are all\n integers in the range [0, 255].\n\n All values should be converted to integers using Python's `round` function.\n\n Any locations with values higher than 255 in the input should have value\n 255 in the output; and any locations with values lower than 0 in the input\n should have value 0 in the output.\n \"\"\"\n for idx, pixel in enumerate(image['pixels']):\n if round(pixel) < 0:\n image['pixels'][idx] = 0\n elif round(pixel) > 255:\n image['pixels'][idx] = 255\n else:\n image['pixels'][idx] = round(pixel)\n return image\n\n\ndef get_blur_kernel(n):\n \"\"\" Get kernel to blur an image\n\n Args:\n n (int): kernel size\n\n Returns:\n list: kernel\n \"\"\"\n return [1 / n ** 2] * n ** 2\n\n\ndef blurred(image, n, correct=True):\n \"\"\"\n Return a new image representing the result of applying a box blur (with\n kernel size n) to the given input image.\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n \"\"\"\n kernel = get_blur_kernel(n)\n correlated = correlate(image, kernel)\n if correct:\n return round_and_clip_image(correlated)\n else:\n return correlated\n\n\n<mask token>\n\n\ndef load_image(filename):\n \"\"\"\n Loads an image from the given file and returns a dictionary\n representing that image. This also performs conversion to greyscale.\n\n Invoked as, for example:\n i = load_image('test_images/cat.png')\n \"\"\"\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for\n p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}\n\n\ndef save_image(image, filename, mode='PNG'):\n \"\"\"\n Saves the given image to disk or to a file-like object. If filename is\n given as a string, the file type will be inferred from the given name. If\n filename is given as a file-like object, the file type will be determined\n by the 'mode' parameter.\n \"\"\"\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef in_bound(dim, s):\n \"\"\"Get inbound pixel coordinate for out-of-bound\n\n Args:\n dim (int): Image height or width\n s (int): Coordinate \n\n Returns:\n int: Inbound\n \"\"\"\n if s <= -1:\n return 0\n elif s >= dim:\n return dim - 1\n else:\n return s\n\n\ndef get_pixel(image, x, y):\n \"\"\"Get pixel of image from coordinates\n\n Args:\n image (dict): Image to get pixel from\n x (int): x coordinate\n y (int): y coordinate\n\n Returns:\n int: Pixel value\n \"\"\"\n x = in_bound(image['height'], x)\n y = in_bound(image['width'], y)\n return image['pixels'][x * image['width'] + y]\n\n\ndef set_pixel(image, c):\n image['pixels'].append(c)\n\n\ndef apply_per_pixel(image, func):\n \"\"\"Apply func on every pixel of image\n\n Args:\n image (dict) : Image to be applied func at\n func (function): Function to be applied\n\n Returns:\n dict: Modified image\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n for x in range(image['height']):\n for y in range(image['width']):\n color = get_pixel(image, x, y)\n newcolor = func(color)\n set_pixel(result, newcolor)\n return result\n\n\ndef inverted(image):\n \"\"\"Invert given image\n\n Args:\n image (dict): Input image\n\n Returns:\n dict: Inverted image\n \"\"\"\n return apply_per_pixel(image, lambda c: 255 - c)\n\n\ndef correlate(image, kernel):\n \"\"\"\n Compute the result of correlating the given image with the given kernel.\n\n The output of this function should have the same form as a 6.009 image (a\n dictionary with 'height', 'width', and 'pixels' keys), but its pixel values\n do not necessarily need to be in the range [0,255], nor do they need to be\n integers (they should not be clipped or rounded at all).\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n\n kernel = [k1, k2, k3, ... kn]\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n kernel_size = int(len(kernel) ** (1 / 2))\n num_layers = int((kernel_size - 1) / 2)\n for x in range(image['height']):\n for y in range(image['width']):\n newpixel = 0.0\n for h in range(kernel_size):\n for w in range(kernel_size):\n newpixel += kernel[h * kernel_size + w] * get_pixel(image,\n x - num_layers + h, y - num_layers + w)\n set_pixel(result, newpixel)\n return result\n\n\ndef round_and_clip_image(image):\n \"\"\"\n Given a dictionary, ensure that the values in the 'pixels' list are all\n integers in the range [0, 255].\n\n All values should be converted to integers using Python's `round` function.\n\n Any locations with values higher than 255 in the input should have value\n 255 in the output; and any locations with values lower than 0 in the input\n should have value 0 in the output.\n \"\"\"\n for idx, pixel in enumerate(image['pixels']):\n if round(pixel) < 0:\n image['pixels'][idx] = 0\n elif round(pixel) > 255:\n image['pixels'][idx] = 255\n else:\n image['pixels'][idx] = round(pixel)\n return image\n\n\ndef get_blur_kernel(n):\n \"\"\" Get kernel to blur an image\n\n Args:\n n (int): kernel size\n\n Returns:\n list: kernel\n \"\"\"\n return [1 / n ** 2] * n ** 2\n\n\ndef blurred(image, n, correct=True):\n \"\"\"\n Return a new image representing the result of applying a box blur (with\n kernel size n) to the given input image.\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n \"\"\"\n kernel = get_blur_kernel(n)\n correlated = correlate(image, kernel)\n if correct:\n return round_and_clip_image(correlated)\n else:\n return correlated\n\n\ndef sharpened(image, n):\n \"\"\"Sharpen the given image\n\n Args:\n image (dict): Given image\n n (int): Kernel size\n\n Returns:\n dict: Sharpened image\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n result['pixels'] = [(2 * x - y) for x, y in zip(image['pixels'],\n blurred(image, n, False)['pixels'])]\n return round_and_clip_image(result)\n\n\ndef edges(i):\n \"\"\"Performs Sobel Operation on given image\n\n Args:\n i (dict): Input image\n Returns:\n dict: Resulting Image\n \"\"\"\n Oxy = i.copy()\n Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]\n Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]\n Ox = correlate(i, Kx)\n Oy = correlate(i, Ky)\n Oxy['pixels'] = [((x ** 2 + y ** 2) ** (1 / 2)) for x, y in zip(Ox[\n 'pixels'], Oy['pixels'])]\n result = round_and_clip_image(Oxy)\n return result\n\n\ndef load_image(filename):\n \"\"\"\n Loads an image from the given file and returns a dictionary\n representing that image. This also performs conversion to greyscale.\n\n Invoked as, for example:\n i = load_image('test_images/cat.png')\n \"\"\"\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for\n p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}\n\n\ndef save_image(image, filename, mode='PNG'):\n \"\"\"\n Saves the given image to disk or to a file-like object. If filename is\n given as a string, the file type will be inferred from the given name. If\n filename is given as a file-like object, the file type will be determined\n by the 'mode' parameter.\n \"\"\"\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()\n\n\n<mask token>\n",
"step-4": "import math\nfrom PIL import Image as Image\n\n\ndef in_bound(dim, s):\n \"\"\"Get inbound pixel coordinate for out-of-bound\n\n Args:\n dim (int): Image height or width\n s (int): Coordinate \n\n Returns:\n int: Inbound\n \"\"\"\n if s <= -1:\n return 0\n elif s >= dim:\n return dim - 1\n else:\n return s\n\n\ndef get_pixel(image, x, y):\n \"\"\"Get pixel of image from coordinates\n\n Args:\n image (dict): Image to get pixel from\n x (int): x coordinate\n y (int): y coordinate\n\n Returns:\n int: Pixel value\n \"\"\"\n x = in_bound(image['height'], x)\n y = in_bound(image['width'], y)\n return image['pixels'][x * image['width'] + y]\n\n\ndef set_pixel(image, c):\n image['pixels'].append(c)\n\n\ndef apply_per_pixel(image, func):\n \"\"\"Apply func on every pixel of image\n\n Args:\n image (dict) : Image to be applied func at\n func (function): Function to be applied\n\n Returns:\n dict: Modified image\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n for x in range(image['height']):\n for y in range(image['width']):\n color = get_pixel(image, x, y)\n newcolor = func(color)\n set_pixel(result, newcolor)\n return result\n\n\ndef inverted(image):\n \"\"\"Invert given image\n\n Args:\n image (dict): Input image\n\n Returns:\n dict: Inverted image\n \"\"\"\n return apply_per_pixel(image, lambda c: 255 - c)\n\n\ndef correlate(image, kernel):\n \"\"\"\n Compute the result of correlating the given image with the given kernel.\n\n The output of this function should have the same form as a 6.009 image (a\n dictionary with 'height', 'width', and 'pixels' keys), but its pixel values\n do not necessarily need to be in the range [0,255], nor do they need to be\n integers (they should not be clipped or rounded at all).\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n\n kernel = [k1, k2, k3, ... kn]\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n kernel_size = int(len(kernel) ** (1 / 2))\n num_layers = int((kernel_size - 1) / 2)\n for x in range(image['height']):\n for y in range(image['width']):\n newpixel = 0.0\n for h in range(kernel_size):\n for w in range(kernel_size):\n newpixel += kernel[h * kernel_size + w] * get_pixel(image,\n x - num_layers + h, y - num_layers + w)\n set_pixel(result, newpixel)\n return result\n\n\ndef round_and_clip_image(image):\n \"\"\"\n Given a dictionary, ensure that the values in the 'pixels' list are all\n integers in the range [0, 255].\n\n All values should be converted to integers using Python's `round` function.\n\n Any locations with values higher than 255 in the input should have value\n 255 in the output; and any locations with values lower than 0 in the input\n should have value 0 in the output.\n \"\"\"\n for idx, pixel in enumerate(image['pixels']):\n if round(pixel) < 0:\n image['pixels'][idx] = 0\n elif round(pixel) > 255:\n image['pixels'][idx] = 255\n else:\n image['pixels'][idx] = round(pixel)\n return image\n\n\ndef get_blur_kernel(n):\n \"\"\" Get kernel to blur an image\n\n Args:\n n (int): kernel size\n\n Returns:\n list: kernel\n \"\"\"\n return [1 / n ** 2] * n ** 2\n\n\ndef blurred(image, n, correct=True):\n \"\"\"\n Return a new image representing the result of applying a box blur (with\n kernel size n) to the given input image.\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n \"\"\"\n kernel = get_blur_kernel(n)\n correlated = correlate(image, kernel)\n if correct:\n return round_and_clip_image(correlated)\n else:\n return correlated\n\n\ndef sharpened(image, n):\n \"\"\"Sharpen the given image\n\n Args:\n image (dict): Given image\n n (int): Kernel size\n\n Returns:\n dict: Sharpened image\n \"\"\"\n result = {'height': image['height'], 'width': image['width'], 'pixels': []}\n result['pixels'] = [(2 * x - y) for x, y in zip(image['pixels'],\n blurred(image, n, False)['pixels'])]\n return round_and_clip_image(result)\n\n\ndef edges(i):\n \"\"\"Performs Sobel Operation on given image\n\n Args:\n i (dict): Input image\n Returns:\n dict: Resulting Image\n \"\"\"\n Oxy = i.copy()\n Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]\n Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]\n Ox = correlate(i, Kx)\n Oy = correlate(i, Ky)\n Oxy['pixels'] = [((x ** 2 + y ** 2) ** (1 / 2)) for x, y in zip(Ox[\n 'pixels'], Oy['pixels'])]\n result = round_and_clip_image(Oxy)\n return result\n\n\ndef load_image(filename):\n \"\"\"\n Loads an image from the given file and returns a dictionary\n representing that image. This also performs conversion to greyscale.\n\n Invoked as, for example:\n i = load_image('test_images/cat.png')\n \"\"\"\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(0.299 * p[0] + 0.587 * p[1] + 0.114 * p[2]) for\n p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}\n\n\ndef save_image(image, filename, mode='PNG'):\n \"\"\"\n Saves the given image to disk or to a file-like object. If filename is\n given as a string, the file type will be inferred from the given name. If\n filename is given as a file-like object, the file type will be determined\n by the 'mode' parameter.\n \"\"\"\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "#!/usr/bin/env python3\n\nimport math\n\nfrom PIL import Image as Image\n\n# NO ADDITIONAL IMPORTS ALLOWED!\n\ndef in_bound(dim , s):\n \"\"\"Get inbound pixel coordinate for out-of-bound\n\n Args:\n dim (int): Image height or width\n s (int): Coordinate \n\n Returns:\n int: Inbound\n \"\"\"\n if s <= -1:\n return 0\n elif s >= dim:\n return dim - 1\n else:\n return s\n\ndef get_pixel(image, x, y):\n \"\"\"Get pixel of image from coordinates\n\n Args:\n image (dict): Image to get pixel from\n x (int): x coordinate\n y (int): y coordinate\n\n Returns:\n int: Pixel value\n \"\"\"\n x = in_bound(image[\"height\"], x)\n y = in_bound(image[\"width\"], y)\n \n return image['pixels'][ x * image[\"width\"] + y]\n\n\ndef set_pixel(image, c):\n image['pixels'].append(c)\n\n\ndef apply_per_pixel(image, func):\n \"\"\"Apply func on every pixel of image\n\n Args:\n image (dict) : Image to be applied func at\n func (function): Function to be applied\n\n Returns:\n dict: Modified image\n \"\"\"\n result = {\n 'height': image['height'],\n 'width': image['width'],\n 'pixels': [],\n }\n for x in range(image['height']):\n for y in range(image['width']):\n color = get_pixel(image, x, y)\n newcolor = func(color)\n set_pixel(result, newcolor)\n return result\n\n\ndef inverted(image):\n \"\"\"Invert given image\n\n Args:\n image (dict): Input image\n\n Returns:\n dict: Inverted image\n \"\"\"\n return apply_per_pixel(image, lambda c: 255-c)\n\n\n# HELPER FUNCTIONS\n\ndef correlate(image, kernel):\n \"\"\"\n Compute the result of correlating the given image with the given kernel.\n\n The output of this function should have the same form as a 6.009 image (a\n dictionary with 'height', 'width', and 'pixels' keys), but its pixel values\n do not necessarily need to be in the range [0,255], nor do they need to be\n integers (they should not be clipped or rounded at all).\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n\n kernel = [k1, k2, k3, ... kn]\n \"\"\"\n result = {\"height\":image[\"height\"],\n \"width\":image[\"width\"],\n \"pixels\": []}\n\n kernel_size = int(len(kernel) ** (1/2))\n num_layers = int((kernel_size - 1)/2) # Layers of kernel excluding center\n for x in range(image[\"height\"]):\n for y in range(image[\"width\"]):\n newpixel = 0.0\n for h in range(kernel_size):\n for w in range(kernel_size):\n # O[x,y] = (K[h,w] * I[x - num_layers + h, y - num_layers + w])\n newpixel += kernel[h * kernel_size + w] * get_pixel(image,x - num_layers + h, y - num_layers + w)\n set_pixel(result, newpixel)\n return result\n \n \n \n\ndef round_and_clip_image(image):\n \"\"\"\n Given a dictionary, ensure that the values in the 'pixels' list are all\n integers in the range [0, 255].\n\n All values should be converted to integers using Python's `round` function.\n\n Any locations with values higher than 255 in the input should have value\n 255 in the output; and any locations with values lower than 0 in the input\n should have value 0 in the output.\n \"\"\"\n \n for idx, pixel in enumerate(image[\"pixels\"]):\n if round(pixel) < 0 :\n image[\"pixels\"][idx] = 0\n elif round(pixel) > 255 :\n image[\"pixels\"][idx] = 255\n else:\n image[\"pixels\"][idx] = round(pixel)\n return image\n\n\n\n# FILTERS\n\n# helpers\ndef get_blur_kernel(n):\n \"\"\" Get kernel to blur an image\n\n Args:\n n (int): kernel size\n\n Returns:\n list: kernel\n \"\"\"\n return [1/n**2] * n**2\n\ndef blurred(image, n ,correct = True):\n \"\"\"\n Return a new image representing the result of applying a box blur (with\n kernel size n) to the given input image.\n\n This process should not mutate the input image; rather, it should create a\n separate structure to represent the output.\n \"\"\"\n # first, create a representation for the appropriate n-by-n kernel (you may\n # wish to define another helper function for this)\n kernel = get_blur_kernel(n)\n # then compute the correlation of the input image with that kernel\n correlated = correlate(image, kernel)\n\n # and, finally, make sure that the output is a valid image (using the\n # helper function from above) before returning it.\n if correct:\n return round_and_clip_image(correlated)\n else:\n return correlated\n\ndef sharpened(image, n):\n \"\"\"Sharpen the given image\n\n Args:\n image (dict): Given image\n n (int): Kernel size\n\n Returns:\n dict: Sharpened image\n \"\"\"\n result = {\"height\": image[\"height\"],\n \"width\":image[\"width\"],\n \"pixels\":[]}\n\n result[\"pixels\"] = [2*x - y for x,y in zip(image[\"pixels\"], blurred(image, n ,False)[\"pixels\"])]\n\n return round_and_clip_image(result)\n\ndef edges(i):\n \"\"\"Performs Sobel Operation on given image\n\n Args:\n i (dict): Input image\n Returns:\n dict: Resulting Image\n \"\"\"\n Oxy = i.copy()\n Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]\n Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]\n\n Ox = correlate(i, Kx)\n Oy = correlate(i,Ky)\n\n Oxy[\"pixels\"] = [ (x**2 + y**2)**(1/2) for x, y in zip(Ox[\"pixels\"], Oy[\"pixels\"])]\n\n result = round_and_clip_image(Oxy)\n return result\n\n\n# HELPER FUNCTIONS FOR LOADING AND SAVING IMAGES\n\ndef load_image(filename):\n \"\"\"\n Loads an image from the given file and returns a dictionary\n representing that image. This also performs conversion to greyscale.\n\n Invoked as, for example:\n i = load_image('test_images/cat.png')\n \"\"\"\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(.299 * p[0] + .587 * p[1] + .114 * p[2])\n for p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}\n\n\ndef save_image(image, filename, mode='PNG'):\n \"\"\"\n Saves the given image to disk or to a file-like object. If filename is\n given as a string, the file type will be inferred from the given name. If\n filename is given as a file-like object, the file type will be determined\n by the 'mode' parameter.\n \"\"\"\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()\n\n\nif __name__ == '__main__':\n # code in this block will only be run when you explicitly run your script,\n # and not when the tests are being run. this is a good place for\n # generating images, etc.\n \n # 3.3 - Run your inversion filter\n # bluegill = load_image(\"test_images/bluegill.png\")\n # inverted_bluegill = inverted(bluegill)\n # save_image(inverted_bluegill, \"test_images/inverted_bluegill.png\")\n pass\n\n\n \n",
"step-ids": [
8,
10,
13,
15,
16
]
}
|
[
8,
10,
13,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', view.enterMarks), path('MarkSheet', view.getMarks,
name='MarkSheet')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import view
urlpatterns = [path('', view.enterMarks), path('MarkSheet', view.getMarks,
name='MarkSheet')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import view
urlpatterns = [
path('', view.enterMarks),
path('MarkSheet', view.getMarks, name='MarkSheet'),
]
|
flexible
|
{
"blob_id": "511c555c88fb646b7b87678044b43a5a623a5ac7",
"index": 4670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', view.enterMarks), path('MarkSheet', view.getMarks,\n name='MarkSheet')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import view\nurlpatterns = [path('', view.enterMarks), path('MarkSheet', view.getMarks,\n name='MarkSheet')]\n",
"step-4": "\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import view\n\nurlpatterns = [\n path('', view.enterMarks),\n path('MarkSheet', view.getMarks, name='MarkSheet'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Stores custom FASTA sequences under a uuid in the database.
Part of the tables used for custom jobs.
"""
import uuid
from pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data
from pred.queries.dbutil import update_database, read_database
from Bio import SeqIO
from io import StringIO
class SequenceList(object):
"""
CRUD for managing FASTA file contents in the database.
"""
def __init__(self, seq_uuid):
"""
Setup sequence list with primary key seq_uuid.
:param seq_uuid: str: uuid that uniquely represents this list.
"""
if not seq_uuid:
raise ValueError("SequenceList uuid must have a value yours:'{}'.".format(seq_uuid))
self.seq_uuid = seq_uuid
self.content = None
self.created = None
self.title = None
def insert(self, db):
"""
Save self.contents to the database under self.seq_uuid.
:param db: database connection
"""
if not self.content:
raise ValueError("SequenceList content property must be filled in before calling save.")
if not self.title:
raise ValueError("SequenceList title property must be filled in before calling save.")
seq_item_list = SequenceListItems(self.content)
cur = db.cursor()
self._insert_data(cur, seq_item_list, self.title)
cur.close()
db.commit()
def _insert_data(self, cur, item_list, title):
cur.execute("insert into sequence_list(id, data, title) values(%s, %s, %s)",
[self.seq_uuid, item_list.data, title])
for item in item_list.items:
cur.execute("insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)",
[self.seq_uuid, item['idx'], item['name'], item['sequence']])
def load(self, db):
"""
Load self.contents from the database based on self.seq_uuid.
:param db: database connection
"""
rows = read_database(db, "select data, created, title from sequence_list where id = %s", [self.seq_uuid])
if not rows:
raise KeyError("Unable to find sequence for {}".format(self.seq_uuid))
first_row = rows[0]
self.content = first_row[0]
self.created = first_row[1]
self.title = first_row[2]
@staticmethod
def create_with_content_and_title(db, content, title):
"""
Saves content into the database under a new uuid.
:param db: database connection
:param content: str: FASTA file data to save in the database
:return: str: new uuid created for this content
"""
sequence_list = SequenceList(str(uuid.uuid1()))
sequence_list.content = content
sequence_list.title = title
sequence_list.insert(db)
return sequence_list.seq_uuid
@staticmethod
def read_list(db, seq_uuid):
"""
Lookup the content from the database via the seq_uuid provided.
:param db: database connection
:param seq_uuid: str: uuid to lookup
:return: str: FASTA file data associated with the seq_uuid
"""
sequence_list = SequenceList(seq_uuid)
sequence_list.load(db)
return sequence_list
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = "select sequence_list.id from sequence_list " \
" left outer join job on sequence_list.id = job.seq_id " \
" where job.id is null " \
" and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'".format(hours)
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute("delete from sequence_list_item where seq_id = %s", [seq_id])
cur.execute("delete from sequence_list where id = %s", [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith(">"):
result = ""
cnt = 1
for line in data.split('\n'):
if line:
result += ">seq{}\n".format(cnt)
result += line
result += "\n"
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({
'idx': cnt,
'name': seq.name,
'sequence': str(seq.seq)
})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException("Error: Duplicate sequence names found.", ErrorType.INVALID_SEQUENCE_DATA)
|
normal
|
{
"blob_id": "2e744c0cbddf64a9c538c9f33fa19ff78c515012",
"index": 6797,
"step-1": "<mask token>\n\n\nclass SequenceList(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-2": "<mask token>\n\n\nclass SequenceList(object):\n <mask token>\n\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\"\n .format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\n 'SequenceList content property must be filled in before calling save.'\n )\n if not self.title:\n raise ValueError(\n 'SequenceList title property must be filled in before calling save.'\n )\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\n 'insert into sequence_list(id, data, title) values(%s, %s, %s)',\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\n 'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'\n , [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db,\n 'select data, created, title from sequence_list where id = %s',\n [self.seq_uuid])\n if not rows:\n raise KeyError('Unable to find sequence for {}'.format(self.\n seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-3": "<mask token>\n\n\nclass SequenceList(object):\n \"\"\"\n CRUD for managing FASTA file contents in the database.\n \"\"\"\n\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\"\n .format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\n 'SequenceList content property must be filled in before calling save.'\n )\n if not self.title:\n raise ValueError(\n 'SequenceList title property must be filled in before calling save.'\n )\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\n 'insert into sequence_list(id, data, title) values(%s, %s, %s)',\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\n 'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'\n , [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db,\n 'select data, created, title from sequence_list where id = %s',\n [self.seq_uuid])\n if not rows:\n raise KeyError('Unable to find sequence for {}'.format(self.\n seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-4": "<mask token>\nimport uuid\nfrom pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data\nfrom pred.queries.dbutil import update_database, read_database\nfrom Bio import SeqIO\nfrom io import StringIO\n\n\nclass SequenceList(object):\n \"\"\"\n CRUD for managing FASTA file contents in the database.\n \"\"\"\n\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\"\n .format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\n 'SequenceList content property must be filled in before calling save.'\n )\n if not self.title:\n raise ValueError(\n 'SequenceList title property must be filled in before calling save.'\n )\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\n 'insert into sequence_list(id, data, title) values(%s, %s, %s)',\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\n 'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'\n , [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db,\n 'select data, created, title from sequence_list where id = %s',\n [self.seq_uuid])\n if not rows:\n raise KeyError('Unable to find sequence for {}'.format(self.\n seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-5": "\"\"\"\nStores custom FASTA sequences under a uuid in the database.\nPart of the tables used for custom jobs.\n\"\"\"\nimport uuid\nfrom pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data\nfrom pred.queries.dbutil import update_database, read_database\nfrom Bio import SeqIO\nfrom io import StringIO\n\nclass SequenceList(object):\n \"\"\"\n CRUD for managing FASTA file contents in the database.\n \"\"\"\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\".format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\"SequenceList content property must be filled in before calling save.\")\n if not self.title:\n raise ValueError(\"SequenceList title property must be filled in before calling save.\")\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\"insert into sequence_list(id, data, title) values(%s, %s, %s)\",\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\"insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)\",\n [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db, \"select data, created, title from sequence_list where id = %s\", [self.seq_uuid])\n if not rows:\n raise KeyError(\"Unable to find sequence for {}\".format(self.seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = \"select sequence_list.id from sequence_list \" \\\n \" left outer join job on sequence_list.id = job.seq_id \" \\\n \" where job.id is null \" \\\n \" and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\".format(hours)\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute(\"delete from sequence_list_item where seq_id = %s\", [seq_id])\n cur.execute(\"delete from sequence_list where id = %s\", [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith(\">\"):\n result = \"\"\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += \">seq{}\\n\".format(cnt)\n result += line\n result += \"\\n\"\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({\n 'idx': cnt,\n 'name': seq.name,\n 'sequence': str(seq.seq)\n })\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-ids": [
8,
14,
15,
16,
17
]
}
|
[
8,
14,
15,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, int(input()) + 1):
j = 1
while j < i:
print(j, end='')
j += 1
while i > 0:
print(i, end='')
i -= 1
print()
<|reserved_special_token_1|>
#!/bin/env python3
"""
https://www.hackerrank.com/challenges/triangle-quest-2
INPUT:
integer N
where 0 < N < 10
OUTPUT:
print palindromic triangle of size N
e.g.for N=5
1
121
12321
1234321
123454321
"""
for i in range(1, int(input()) + 1):
j = 1
while j < i:
print(j,end='')
j += 1
while i > 0:
print(i,end='')
i -= 1
print()
|
flexible
|
{
"blob_id": "94cbd9554e3326897147dc417d9fc8f91974786a",
"index": 5098,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, int(input()) + 1):\n j = 1\n while j < i:\n print(j, end='')\n j += 1\n while i > 0:\n print(i, end='')\n i -= 1\n print()\n",
"step-3": "#!/bin/env python3\n\"\"\"\nhttps://www.hackerrank.com/challenges/triangle-quest-2\n\nINPUT:\n integer N\n where 0 < N < 10\n\nOUTPUT:\n print palindromic triangle of size N\n\n e.g.for N=5\n1\n121\n12321\n1234321\n123454321\n\n\"\"\"\nfor i in range(1, int(input()) + 1):\n j = 1\n while j < i:\n print(j,end='')\n j += 1\n\n while i > 0:\n print(i,end='')\n i -= 1\n print()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
add(2, 2)
sub(2, 3)
<|reserved_special_token_1|>
from package.pack import *
add(2, 2)
sub(2, 3)
|
flexible
|
{
"blob_id": "9583a97ae4b1fbf5ecdf33d848b13bf0b28d2eb4",
"index": 2452,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadd(2, 2)\nsub(2, 3)\n",
"step-3": "from package.pack import *\nadd(2, 2)\nsub(2, 3)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 3.0.4 on 2020-03-11 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20200310_1620'),
]
operations = [
migrations.AddField(
model_name='tag',
name='name',
field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.CharField(default='code', max_length=100, unique=True),
),
]
|
normal
|
{
"blob_id": "ab12468b1da20c896e3578091fd9ba245dcfa0a4",
"index": 1350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0003_auto_20200310_1620')]\n operations = [migrations.AddField(model_name='tag', name='name', field=\n models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',\n 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',\n 'HELPER FUNCTION')], default='code', max_length=100)), migrations.\n AddField(model_name='tag', name='slug', field=models.CharField(\n default='code', max_length=100, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0003_auto_20200310_1620')]\n operations = [migrations.AddField(model_name='tag', name='name', field=\n models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',\n 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',\n 'HELPER FUNCTION')], default='code', max_length=100)), migrations.\n AddField(model_name='tag', name='slug', field=models.CharField(\n default='code', max_length=100, unique=True))]\n",
"step-5": "# Generated by Django 3.0.4 on 2020-03-11 17:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0003_auto_20200310_1620'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tag',\n name='name',\n field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),\n ),\n migrations.AddField(\n model_name='tag',\n name='slug',\n field=models.CharField(default='code', max_length=100, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(int(c))
<|reserved_special_token_1|>
h = int(input())
a = int(input())
b = int(input())
c = (h - b + a - b - 1) // (a - b)
print(int(c))
|
flexible
|
{
"blob_id": "eea962d6c519bee802c346fcf8d0c7410e00c30b",
"index": 9587,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(c))\n",
"step-3": "h = int(input())\na = int(input())\nb = int(input())\nc = (h - b + a - b - 1) // (a - b)\nprint(int(c))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def rectangular(f, a, b, n):
h = float(b - a) / n
result = f(a + 0.5 * h)
for i in range(1, n):
result += f(a + 0.5 * h + i * h)
result *= h
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trapezoidal(f, a, b, n):
h = float(b - a) / n
result = 0.5 * (f(a) + f(b))
for i in range(1, n):
result += f(a + i * h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a) / n
result = f(a + 0.5 * h)
for i in range(1, n):
result += f(a + 0.5 * h + i * h)
result *= h
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trapezoidal(f, a, b, n):
h = float(b - a) / n
result = 0.5 * (f(a) + f(b))
for i in range(1, n):
result += f(a + i * h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a) / n
result = f(a + 0.5 * h)
for i in range(1, n):
result += f(a + 0.5 * h + i * h)
result *= h
return result
<|reserved_special_token_0|>
print('Точное значение интеграла: {}\n'.format(manual_calc))
print("""Аппроксимация трапециями:
2 трапеции: {}
100 трапеций: {}""".
format(trap_2, trap_100))
print(
"""Погрешность для аппроксимации трапециями:
2 трапеции: {}
100 трапеций: {}
"""
.format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))
print(
"""Аппроксимация прямоугольниками:
2 прямоугольника: {}
100 прямоугольников: {}"""
.format(rect_2, rect_100))
print(
"""Погрешность для аппроксимации прямоугольниками:
2 прямоугольника: {}
100 прямоугольников: {}"""
.format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))
<|reserved_special_token_1|>
manual_calc = 53 + 1.0 / 3
def trapezoidal(f, a, b, n):
h = float(b - a) / n
result = 0.5 * (f(a) + f(b))
for i in range(1, n):
result += f(a + i * h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a) / n
result = f(a + 0.5 * h)
for i in range(1, n):
result += f(a + 0.5 * h + i * h)
result *= h
return result
trap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)
trap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)
rect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)
rect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)
print('Точное значение интеграла: {}\n'.format(manual_calc))
print("""Аппроксимация трапециями:
2 трапеции: {}
100 трапеций: {}""".
format(trap_2, trap_100))
print(
"""Погрешность для аппроксимации трапециями:
2 трапеции: {}
100 трапеций: {}
"""
.format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))
print(
"""Аппроксимация прямоугольниками:
2 прямоугольника: {}
100 прямоугольников: {}"""
.format(rect_2, rect_100))
print(
"""Погрешность для аппроксимации прямоугольниками:
2 прямоугольника: {}
100 прямоугольников: {}"""
.format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# упражнение выполнено на Python 3
manual_calc = 53 + 1.0/3
def trapezoidal(f, a, b, n):
h = float(b - a)/n
result = 0.5*(f(a) + f(b))
for i in range(1, n):
result += f(a + i*h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a)/n
result = f(a+0.5*h)
for i in range(1, n):
result += f(a + 0.5*h + i*h)
result *= h
return result
trap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)
trap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)
rect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)
rect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)
print('Точное значение интеграла: {}\n'.format(manual_calc))
print('Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}'
.format(trap_2, trap_100))
print('Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n'
.format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))
print('Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(rect_2, rect_100))
print('Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))
|
flexible
|
{
"blob_id": "4fbf5b4520aa4dca4c7cc80d56ba00f634d184bf",
"index": 3405,
"step-1": "<mask token>\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\nprint(\"\"\"Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\"\"\".\n format(trap_2, trap_100))\nprint(\n \"\"\"Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n\"\"\"\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\nprint(\n \"\"\"Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(rect_2, rect_100))\nprint(\n \"\"\"Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))\n",
"step-4": "manual_calc = 53 + 1.0 / 3\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\ntrap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)\ntrap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)\nrect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)\nrect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\nprint(\"\"\"Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\"\"\".\n format(trap_2, trap_100))\nprint(\n \"\"\"Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n\"\"\"\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\nprint(\n \"\"\"Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(rect_2, rect_100))\nprint(\n \"\"\"Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))\n",
"step-5": "# -*- coding: utf-8 -*-\n# упражнение выполнено на Python 3\n\n\nmanual_calc = 53 + 1.0/3\n\n\ndef trapezoidal(f, a, b, n):\n\t\n\th = float(b - a)/n\n\tresult = 0.5*(f(a) + f(b))\n\tfor i in range(1, n):\n\t\tresult += f(a + i*h)\n\tresult *= h\n\treturn result\n\n\ndef rectangular(f, a, b, n):\n\t\n\th = float(b - a)/n\n\tresult = f(a+0.5*h)\n\tfor i in range(1, n):\n\t\tresult += f(a + 0.5*h + i*h)\n\tresult *= h\n\treturn result\n\n\ntrap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)\ntrap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)\nrect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)\nrect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)\n\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\n\nprint('Аппроксимация трапециями:\\n 2 трапеции: {}\\n 100 трапеций: {}'\n .format(trap_2, trap_100))\n\nprint('Погрешность для аппроксимации трапециями:\\n 2 трапеции: {}\\n 100 трапеций: {}\\n'\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\n\nprint('Аппроксимация прямоугольниками:\\n 2 прямоугольника: {}\\n 100 прямоугольников: {}'\n .format(rect_2, rect_100))\n\nprint('Погрешность для аппроксимации прямоугольниками:\\n 2 прямоугольника: {}\\n 100 прямоугольников: {}'\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('app01', '0004_auto_20200213_1202')]
operations = [migrations.DeleteModel(name='Subject'), migrations.
RenameField(model_name='user', old_name='name', new_name=
'user_name'), migrations.AlterField(model_name='user', name='id',
field=models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('app01', '0004_auto_20200213_1202')]
operations = [migrations.DeleteModel(name='Subject'), migrations.
RenameField(model_name='user', old_name='name', new_name=
'user_name'), migrations.AlterField(model_name='user', name='id',
field=models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID'))]
<|reserved_special_token_1|>
# Generated by Django 2.2.1 on 2020-02-13 05:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0004_auto_20200213_1202'),
]
operations = [
migrations.DeleteModel(
name='Subject',
),
migrations.RenameField(
model_name='user',
old_name='name',
new_name='user_name',
),
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
flexible
|
{
"blob_id": "9b7601a5230bfd2370e73a71d141d6de68ade50f",
"index": 8972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app01', '0004_auto_20200213_1202')]\n operations = [migrations.DeleteModel(name='Subject'), migrations.\n RenameField(model_name='user', old_name='name', new_name=\n 'user_name'), migrations.AlterField(model_name='user', name='id',\n field=models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app01', '0004_auto_20200213_1202')]\n operations = [migrations.DeleteModel(name='Subject'), migrations.\n RenameField(model_name='user', old_name='name', new_name=\n 'user_name'), migrations.AlterField(model_name='user', name='id',\n field=models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID'))]\n",
"step-5": "# Generated by Django 2.2.1 on 2020-02-13 05:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app01', '0004_auto_20200213_1202'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Subject',\n ),\n migrations.RenameField(\n model_name='user',\n old_name='name',\n new_name='user_name',\n ),\n migrations.AlterField(\n model_name='user',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Node(object):
<|reserved_special_token_0|>
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def main():
Tree = tree()
l = [50, 30, 20, 40, 70, 60, 80]
for item in l:
Tree.insert(Tree.root, item)
print(Tree.delete(Tree.root, 20, None))
print('inorder after deleting 20:')
print(Tree.inorder(Tree.root))
print(Tree.delete(Tree.root, 30, None))
print(Tree.delete(Tree.root, 50, None))
print(Tree.inorder(Tree.root))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def main():
Tree = tree()
l = [50, 30, 20, 40, 70, 60, 80]
for item in l:
Tree.insert(Tree.root, item)
print(Tree.delete(Tree.root, 20, None))
print('inorder after deleting 20:')
print(Tree.inorder(Tree.root))
print(Tree.delete(Tree.root, 30, None))
print(Tree.delete(Tree.root, 50, None))
print(Tree.inorder(Tree.root))
main()
<|reserved_special_token_1|>
class Node(object):
def __init__(self,data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self,root,value):
if self.root == None:
self.root = Node(value)
else:
if value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left,value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right,value)
return root
def delete(self,root,data,parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right,data,parent)
elif root.data > data :
parent = root
root.left = self.delete(root.left,data,parent)
else:
if root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right,temp.data,parent)
return root
def successor(self,root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self,root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def main():
Tree = tree()
l =[50,30,20,40,70,60,80]
for item in l:
Tree.insert(Tree.root,item)
print(Tree.delete(Tree.root,20,None))
print("inorder after deleting 20:")
print(Tree.inorder(Tree.root))
print(Tree.delete(Tree.root,30,None))
print(Tree.delete(Tree.root,50,None))
print(Tree.inorder(Tree.root))
main()
|
flexible
|
{
"blob_id": "64c32b3ada7fff51a7c4b07872b7688e100897d8",
"index": 81,
"step-1": "class Node(object):\n <mask token>\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<mask token>\n",
"step-2": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<mask token>\n",
"step-3": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\n<mask token>\n",
"step-4": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\nmain()\n",
"step-5": "class Node(object):\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\nclass tree(object):\n def __init__(self):\n self.root = None\n \n def insert(self,root,value):\n if self.root == None:\n self.root = Node(value)\n else:\n if value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left,value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right,value)\n return root \n def delete(self,root,data,parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right,data,parent)\n elif root.data > data :\n parent = root\n root.left = self.delete(root.left,data,parent)\n else:\n if root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n \n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right,temp.data,parent)\n \n return root\n \n def successor(self,root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n def inorder(self,root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n \ndef main():\n Tree = tree()\n l =[50,30,20,40,70,60,80]\n for item in l:\n Tree.insert(Tree.root,item)\n print(Tree.delete(Tree.root,20,None))\n print(\"inorder after deleting 20:\")\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root,30,None))\n print(Tree.delete(Tree.root,50,None))\n print(Tree.inorder(Tree.root))\n \nmain()\n \n \n \n \n \n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
# MODULES
import sys
sys.path.append('~/Documents/Project_3/REPO')
from scipy import *
from scipy import linalg
import cPickle as pickle
import ConfigParser
import TobySpectralMethods as tsm
config = ConfigParser.RawConfigParser()
fp = open('config.cfg')
config.readfp(fp)
N = config.getint('General', 'N')
M = config.getint('General', 'M')
Re = config.getfloat('General', 'Re')
kx = config.getfloat('General', 'kx')
dt = config.getfloat('Time Iteration', 'dt')
totTime = config.getfloat('Time Iteration', 'totTime')
numFrames = config.getint('Time Iteration', 'numFrames')
fp.close()
amp = 0.025
tsm.initTSM(N_=N, M_=M, kx_=kx)
def mk_PSI_ECS_guess():
PSI = zeros(vecLen, dtype='complex')
PSI[N*M] += 2.0/3.0
PSI[N*M+1] += 3.0/4.0
PSI[N*M+2] += 0.0
PSI[N*M+3] += -1.0/12.0
# Perturb 3 of 4 of first Chebyshevs of the 1st Fourier mode
PSI[(N-1)*M] = -random.normal(loc=amp, scale=0.001)
PSI[(N-1)*M+2] = random.normal(loc=amp, scale=0.001)
PSI[(N-1)*M+4] = -0.1*random.normal(loc=amp, scale=0.001)
PSI[(N-1)*M+6] = -0.05*random.normal(loc=amp, scale=0.001)
PSI[(N+1)*M:(N+2)*M] = conjugate(PSI[(N-1)*M:N*M])
# reduce the base flow KE by a roughly corresponding amount (8pc), with this
# energy in the perturbation (hopefully). ( 0.96 is about root(0.92) )
bfReduc = 0.8
PSI[N*M:(N+1)*M] = bfReduc*PSI[N*M:(N+1)*M]
# Check to make sure energy is large enough to get an ECS
U = dot(MDY, PSI)
V = - dot(MDX, PSI)
MMU = tsm.prod_mat(U)
MMV = tsm.prod_mat(V)
Usq = dot(MMU, U) + dot(MMV, V)
Usq1 = Usq[(N-1)*M:N*M] + Usq[(N+1)*M:(N+2)*M]
Usq2 = Usq[(N-2)*M:(N-1)*M] + Usq[(N+2)*M:(N+3)*M]
KE0 = 0.5*dot(INTY, Usq[N*M:(N+1)*M])
KE1 = 0.5*dot(INTY, Usq1)
KE2 = 0.5*dot(INTY, Usq2)
print 'Kinetic energy of 0th mode is: ', KE0
print 'Kinetic energy of 1st mode is: ', KE1
print 'TOTAL: ', KE0+KE1+KE2
print 'norm of 0th mode is: ', linalg.norm(PSI[N*M:(N+1)*M], 2)
print 'norm of 1st mode is: ', linalg.norm(PSI[(N-1)*M:N*M] +
PSI[(N+1)*M:(N+2)*M], 2)
return PSI
# MAIN
vecLen = (2*N+1)*M
# Useful operators
MDY = tsm.mk_diff_y()
MDYY = dot(MDY,MDY)
MDYYY = dot(MDY,MDYY)
MDX = tsm.mk_diff_x()
MDXX = dot(MDX, MDX)
MDXY = dot(MDX, MDY)
LAPLAC = dot(MDX,MDX) + dot(MDY,MDY)
BIHARM = dot(LAPLAC, LAPLAC)
MDXLAPLAC = dot(MDX, LAPLAC)
MDYLAPLAC = dot(MDY, LAPLAC)
# single mode Operators
SMDY = tsm.mk_single_diffy()
SMDYY = dot(SMDY, SMDY)
SMDYYY = dot(SMDY, SMDYY)
INTY = tsm.mk_cheb_int()
# Identity
SII = eye(M, M, dtype='complex')
PSI = mk_PSI_ECS_guess()
pickle.dump(PSI, open('psi.init', 'w'))
|
normal
|
{
"blob_id": "1221394dfb97cbbfb00b412f60d4df521acc1262",
"index": 8029,
"step-1": "\n# MODULES\nimport sys\nsys.path.append('~/Documents/Project_3/REPO')\nfrom scipy import *\nfrom scipy import linalg\nimport cPickle as pickle\nimport ConfigParser\nimport TobySpectralMethods as tsm\n\nconfig = ConfigParser.RawConfigParser()\nfp = open('config.cfg')\nconfig.readfp(fp)\nN = config.getint('General', 'N')\nM = config.getint('General', 'M')\nRe = config.getfloat('General', 'Re')\nkx = config.getfloat('General', 'kx')\ndt = config.getfloat('Time Iteration', 'dt')\ntotTime = config.getfloat('Time Iteration', 'totTime')\nnumFrames = config.getint('Time Iteration', 'numFrames')\nfp.close()\n\namp = 0.025\n\ntsm.initTSM(N_=N, M_=M, kx_=kx)\n\ndef mk_PSI_ECS_guess():\n\n PSI = zeros(vecLen, dtype='complex')\n\n PSI[N*M] += 2.0/3.0\n PSI[N*M+1] += 3.0/4.0\n PSI[N*M+2] += 0.0\n PSI[N*M+3] += -1.0/12.0\n\n # Perturb 3 of 4 of first Chebyshevs of the 1st Fourier mode\n PSI[(N-1)*M] = -random.normal(loc=amp, scale=0.001) \n PSI[(N-1)*M+2] = random.normal(loc=amp, scale=0.001) \n PSI[(N-1)*M+4] = -0.1*random.normal(loc=amp, scale=0.001) \n PSI[(N-1)*M+6] = -0.05*random.normal(loc=amp, scale=0.001) \n\n PSI[(N+1)*M:(N+2)*M] = conjugate(PSI[(N-1)*M:N*M])\n\n # reduce the base flow KE by a roughly corresponding amount (8pc), with this\n # energy in the perturbation (hopefully). ( 0.96 is about root(0.92) )\n bfReduc = 0.8\n PSI[N*M:(N+1)*M] = bfReduc*PSI[N*M:(N+1)*M]\n\n # Check to make sure energy is large enough to get an ECS\n U = dot(MDY, PSI)\n V = - dot(MDX, PSI)\n MMU = tsm.prod_mat(U)\n MMV = tsm.prod_mat(V)\n Usq = dot(MMU, U) + dot(MMV, V)\n Usq1 = Usq[(N-1)*M:N*M] + Usq[(N+1)*M:(N+2)*M]\n Usq2 = Usq[(N-2)*M:(N-1)*M] + Usq[(N+2)*M:(N+3)*M]\n KE0 = 0.5*dot(INTY, Usq[N*M:(N+1)*M])\n KE1 = 0.5*dot(INTY, Usq1)\n KE2 = 0.5*dot(INTY, Usq2)\n print 'Kinetic energy of 0th mode is: ', KE0\n print 'Kinetic energy of 1st mode is: ', KE1\n print 'TOTAL: ', KE0+KE1+KE2\n\n print 'norm of 0th mode is: ', linalg.norm(PSI[N*M:(N+1)*M], 2)\n print 'norm of 1st mode is: ', linalg.norm(PSI[(N-1)*M:N*M] +\n PSI[(N+1)*M:(N+2)*M], 2)\n\n return PSI\n\n\n# MAIN\nvecLen = (2*N+1)*M\n# Useful operators \n\nMDY = tsm.mk_diff_y()\nMDYY = dot(MDY,MDY)\nMDYYY = dot(MDY,MDYY)\nMDX = tsm.mk_diff_x()\nMDXX = dot(MDX, MDX)\nMDXY = dot(MDX, MDY)\nLAPLAC = dot(MDX,MDX) + dot(MDY,MDY)\nBIHARM = dot(LAPLAC, LAPLAC)\nMDXLAPLAC = dot(MDX, LAPLAC)\nMDYLAPLAC = dot(MDY, LAPLAC)\n\n# single mode Operators\nSMDY = tsm.mk_single_diffy()\nSMDYY = dot(SMDY, SMDY)\nSMDYYY = dot(SMDY, SMDYY)\n\nINTY = tsm.mk_cheb_int()\n\n# Identity\nSII = eye(M, M, dtype='complex')\n\n\nPSI = mk_PSI_ECS_guess()\n\npickle.dump(PSI, open('psi.init', 'w'))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class CustomPrinter(object):
<|reserved_special_token_0|>
def to_string(self):
res = '{'
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ', '
res += ' }'
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class CustomPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
res = '{'
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ', '
res += ' }'
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class CustomPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
res = '{'
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ', '
res += ' }'
return res
def lookup_type(val):
if str(val.type) == 'unsigned char [64]':
return CustomPrinter(val)
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class CustomPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
res = '{'
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ', '
res += ' }'
return res
def lookup_type(val):
if str(val.type) == 'unsigned char [64]':
return CustomPrinter(val)
return None
gdb.pretty_printers.append(lookup_type)
<|reserved_special_token_1|>
class CustomPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
res = "{"
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ", "
res += " }"
return res
def lookup_type(val):
if str(val.type) == 'unsigned char [64]':
return CustomPrinter(val)
return None
gdb.pretty_printers.append(lookup_type)
|
flexible
|
{
"blob_id": "4d5b2ed016cfc6740c3ee5397c894fabc1bec73f",
"index": 6963,
"step-1": "class CustomPrinter(object):\n <mask token>\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\n<mask token>\n",
"step-2": "class CustomPrinter(object):\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\n<mask token>\n",
"step-3": "class CustomPrinter(object):\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\ndef lookup_type(val):\n if str(val.type) == 'unsigned char [64]':\n return CustomPrinter(val)\n return None\n\n\n<mask token>\n",
"step-4": "class CustomPrinter(object):\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\ndef lookup_type(val):\n if str(val.type) == 'unsigned char [64]':\n return CustomPrinter(val)\n return None\n\n\ngdb.pretty_printers.append(lookup_type)\n",
"step-5": "class CustomPrinter(object):\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = \"{\"\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += \", \"\n res += \" }\"\n return res\n\n\ndef lookup_type(val):\n if str(val.type) == 'unsigned char [64]':\n return CustomPrinter(val)\n return None\n\n\ngdb.pretty_printers.append(lookup_type)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('데이터 셋 크기:', iris['data'].shape)
<|reserved_special_token_0|>
print(type(data1))
<|reserved_special_token_0|>
print(df)
<|reserved_special_token_0|>
print('데이터셋 내용:\n', iris['data'][:7, :])
<|reserved_special_token_0|>
print('데이터 프레임의 형태:', df.shape)
<|reserved_special_token_0|>
print(df.head(2))
<|reserved_special_token_0|>
print(df.head())
<|reserved_special_token_0|>
print(df)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
iris = datasets.load_iris()
<|reserved_special_token_0|>
print('데이터 셋 크기:', iris['data'].shape)
data1 = ['a', 'b', 'c', 'd', 'e']
print(type(data1))
sr1 = pd.Series(data1)
data2 = 1, 2, 3.14, 100, -10
sr2 = pd.Series(data2)
dict_data = {'c1': data1, 'c2': data2}
df = pd.DataFrame(dict_data)
print(df)
df.columns = ['string1', 'string2']
df.index = ['r1', 'r2', 'r3', 'r4', 'r5']
print('데이터셋 내용:\n', iris['data'][:7, :])
df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
print('데이터 프레임의 형태:', df.shape)
df.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
print(df.head(2))
df['Target'] = iris['target']
print(df.head())
x = [2, 1, 13, 4, 15, 26]
y = [0, 4, 31, 2, 42, 54]
df = pd.DataFrame({'X': x, 'Y': y})
print(df)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
<|reserved_special_token_0|>
print('데이터 셋 크기:', iris['data'].shape)
data1 = ['a', 'b', 'c', 'd', 'e']
print(type(data1))
sr1 = pd.Series(data1)
data2 = 1, 2, 3.14, 100, -10
sr2 = pd.Series(data2)
dict_data = {'c1': data1, 'c2': data2}
df = pd.DataFrame(dict_data)
print(df)
df.columns = ['string1', 'string2']
df.index = ['r1', 'r2', 'r3', 'r4', 'r5']
print('데이터셋 내용:\n', iris['data'][:7, :])
df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
print('데이터 프레임의 형태:', df.shape)
df.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
print(df.head(2))
df['Target'] = iris['target']
print(df.head())
x = [2, 1, 13, 4, 15, 26]
y = [0, 4, 31, 2, 42, 54]
df = pd.DataFrame({'X': x, 'Y': y})
print(df)
<|reserved_special_token_1|>
# 라이브러리 환경
import pandas as pd
import numpy as np
# sklearn 테이터셋에서 iris 데이터셋 로딩
from sklearn import datasets
iris = datasets.load_iris()
# iris 데이터셋은 딕셔너리 형태이므로, key 값 확인
'''
print(iris.keys())
print(iris['DESCR'])
print("데이터 셋 크기:", iris['target'])
print("데이터 셋 내용:\n", iris['target'])
'''
# data 속성의 데이터셋 크기
print("데이터 셋 크기:", iris['data'].shape)
# data 속성의 데이터셋 내용(첫 7개 행 추출)
data1 = ['a', 'b', 'c', 'd', 'e']
print(type(data1))
sr1 = pd.Series(data1)
# print(type(sr1))
data2 = (1, 2, 3.14, 100, -10)
sr2 = pd.Series(data2)
dict_data = {'c1':data1, 'c2':data2}
df = pd.DataFrame(dict_data)
print(df)
# 열(columns)과 행(index)이름 바꾸기
df.columns = ['string1', 'string2']
df.index = ['r1', 'r2', 'r3', 'r4', 'r5']
# print(df.loc['r2':'r4', 'string1':'string2'])
print('데이터셋 내용:\n', iris['data'][:7, :])
df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
print('데이터 프레임의 형태:', df.shape)
df.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
print(df.head(2))
df['Target'] = iris['target']
print(df.head())
x = [2, 1, 13, 4, 15, 26]
y = [0, 4, 31, 2, 42, 54]
df = pd.DataFrame({'X':x, 'Y':y})
print(df)
|
flexible
|
{
"blob_id": "dc2c9293040204f0ec2156c41b8be624f4e5cf99",
"index": 8389,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('데이터 셋 크기:', iris['data'].shape)\n<mask token>\nprint(type(data1))\n<mask token>\nprint(df)\n<mask token>\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\n<mask token>\nprint('데이터 프레임의 형태:', df.shape)\n<mask token>\nprint(df.head(2))\n<mask token>\nprint(df.head())\n<mask token>\nprint(df)\n",
"step-3": "<mask token>\niris = datasets.load_iris()\n<mask token>\nprint('데이터 셋 크기:', iris['data'].shape)\ndata1 = ['a', 'b', 'c', 'd', 'e']\nprint(type(data1))\nsr1 = pd.Series(data1)\ndata2 = 1, 2, 3.14, 100, -10\nsr2 = pd.Series(data2)\ndict_data = {'c1': data1, 'c2': data2}\ndf = pd.DataFrame(dict_data)\nprint(df)\ndf.columns = ['string1', 'string2']\ndf.index = ['r1', 'r2', 'r3', 'r4', 'r5']\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\ndf = pd.DataFrame(iris['data'], columns=iris['feature_names'])\nprint('데이터 프레임의 형태:', df.shape)\ndf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\nprint(df.head(2))\ndf['Target'] = iris['target']\nprint(df.head())\nx = [2, 1, 13, 4, 15, 26]\ny = [0, 4, 31, 2, 42, 54]\ndf = pd.DataFrame({'X': x, 'Y': y})\nprint(df)\n",
"step-4": "import pandas as pd\nimport numpy as np\nfrom sklearn import datasets\niris = datasets.load_iris()\n<mask token>\nprint('데이터 셋 크기:', iris['data'].shape)\ndata1 = ['a', 'b', 'c', 'd', 'e']\nprint(type(data1))\nsr1 = pd.Series(data1)\ndata2 = 1, 2, 3.14, 100, -10\nsr2 = pd.Series(data2)\ndict_data = {'c1': data1, 'c2': data2}\ndf = pd.DataFrame(dict_data)\nprint(df)\ndf.columns = ['string1', 'string2']\ndf.index = ['r1', 'r2', 'r3', 'r4', 'r5']\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\ndf = pd.DataFrame(iris['data'], columns=iris['feature_names'])\nprint('데이터 프레임의 형태:', df.shape)\ndf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\nprint(df.head(2))\ndf['Target'] = iris['target']\nprint(df.head())\nx = [2, 1, 13, 4, 15, 26]\ny = [0, 4, 31, 2, 42, 54]\ndf = pd.DataFrame({'X': x, 'Y': y})\nprint(df)\n",
"step-5": "# 라이브러리 환경\nimport pandas as pd\nimport numpy as np\n\n# sklearn 테이터셋에서 iris 데이터셋 로딩\nfrom sklearn import datasets\niris = datasets.load_iris()\n\n# iris 데이터셋은 딕셔너리 형태이므로, key 값 확인\n'''\nprint(iris.keys())\nprint(iris['DESCR'])\nprint(\"데이터 셋 크기:\", iris['target'])\nprint(\"데이터 셋 내용:\\n\", iris['target'])\n'''\n\n# data 속성의 데이터셋 크기\nprint(\"데이터 셋 크기:\", iris['data'].shape)\n\n# data 속성의 데이터셋 내용(첫 7개 행 추출)\ndata1 = ['a', 'b', 'c', 'd', 'e']\nprint(type(data1))\nsr1 = pd.Series(data1)\n# print(type(sr1))\ndata2 = (1, 2, 3.14, 100, -10)\nsr2 = pd.Series(data2)\n\ndict_data = {'c1':data1, 'c2':data2}\ndf = pd.DataFrame(dict_data)\nprint(df)\n\n\n# 열(columns)과 행(index)이름 바꾸기\ndf.columns = ['string1', 'string2']\ndf.index = ['r1', 'r2', 'r3', 'r4', 'r5']\n\n# print(df.loc['r2':'r4', 'string1':'string2'])\n\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\ndf = pd.DataFrame(iris['data'], columns=iris['feature_names'])\n\nprint('데이터 프레임의 형태:', df.shape)\ndf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\nprint(df.head(2))\n\ndf['Target'] = iris['target']\nprint(df.head())\n\nx = [2, 1, 13, 4, 15, 26]\ny = [0, 4, 31, 2, 42, 54]\n\ndf = pd.DataFrame({'X':x, 'Y':y})\nprint(df)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@click.command()
@click.option('-s', '--batch-size', 'batch_size', default=50)
def analyze(batch_size):
db = db_connect()
db_ensure_init(db)
cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_update = list()
for r in batch:
print('Analyzing: ' + r[1])
response = requests.get(r[1])
text = parse_text(response.text)
print(text[0:400] + '\n[CLIPPED]')
result = tone_count_with_negation_check(lmdict, text)
has_positive_sentiment = result[1] > result[2]
to_update.append((True, has_positive_sentiment, result[0],
result[1], result[2], ' '.join(result[3]), ' '.join(result[
4]), r[0]))
db_update(db, to_update)
<|reserved_special_token_0|>
def fix_url(href, base_url):
path = href.replace('ix?doc=/', '')
url = urljoin(base_url, path)
return url
<|reserved_special_token_0|>
@click.group()
def cli():
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@click.command()
@click.option('-s', '--batch-size', 'batch_size', default=50)
def analyze(batch_size):
db = db_connect()
db_ensure_init(db)
cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_update = list()
for r in batch:
print('Analyzing: ' + r[1])
response = requests.get(r[1])
text = parse_text(response.text)
print(text[0:400] + '\n[CLIPPED]')
result = tone_count_with_negation_check(lmdict, text)
has_positive_sentiment = result[1] > result[2]
to_update.append((True, has_positive_sentiment, result[0],
result[1], result[2], ' '.join(result[3]), ' '.join(result[
4]), r[0]))
db_update(db, to_update)
@click.command()
@click.argument('start', nargs=1)
@click.argument('end', nargs=1)
@click.option('-s', '--batch-size', 'batch_size', default=50)
def fetch_report_urls(start, end, batch_size):
"""Fetches and stores the 10-K report URLs"""
db = db_connect()
db_ensure_init(db)
with open('log.csv', 'w', newline='') as log:
logwriter = csv.writer(log)
cmd = db.execute(
"""
SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path
FROM "index" ix
LEFT JOIN reports r ON ix.id = r.index_id
WHERE ix.type = '10-K' AND r.id IS NULL AND
CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND
CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}
ORDER BY ix.date DESC
"""
.format(start=start, end=end))
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_insert = list()
for r in batch:
log_row = r
response = requests.get(r[5])
href = parse_href(response.content)
url = fix_url(href, r[5])
print(url)
filetype = mimetypes.guess_type(url)[0]
print(filetype)
filename = os.path.basename(urlparse(url).path)
print(filename)
to_insert.append((r[0], r[1], r[2], r[3], r[4], url,
filetype, filename))
logwriter.writerow(log_row)
db_insert(db, to_insert)
<|reserved_special_token_0|>
def fix_url(href, base_url):
path = href.replace('ix?doc=/', '')
url = urljoin(base_url, path)
return url
<|reserved_special_token_0|>
def db_connect():
db = sqlite3.connect('edgar_htm_idx.sqlite3')
return db
<|reserved_special_token_0|>
def db_update(db: Connection, records):
c = db.cursor()
c.executemany(
"""
UPDATE reports SET
is_analyzed = ?,
has_positive_sentiment = ?,
word_count = ?,
pos_count = ?,
neg_count = ?,
pos_words = ?,
neg_words = ?
where id = ?"""
, records)
db.commit()
<|reserved_special_token_0|>
@click.group()
def cli():
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@click.command()
@click.option('-s', '--batch-size', 'batch_size', default=50)
def analyze(batch_size):
db = db_connect()
db_ensure_init(db)
cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_update = list()
for r in batch:
print('Analyzing: ' + r[1])
response = requests.get(r[1])
text = parse_text(response.text)
print(text[0:400] + '\n[CLIPPED]')
result = tone_count_with_negation_check(lmdict, text)
has_positive_sentiment = result[1] > result[2]
to_update.append((True, has_positive_sentiment, result[0],
result[1], result[2], ' '.join(result[3]), ' '.join(result[
4]), r[0]))
db_update(db, to_update)
@click.command()
@click.argument('start', nargs=1)
@click.argument('end', nargs=1)
@click.option('-s', '--batch-size', 'batch_size', default=50)
def fetch_report_urls(start, end, batch_size):
"""Fetches and stores the 10-K report URLs"""
db = db_connect()
db_ensure_init(db)
with open('log.csv', 'w', newline='') as log:
logwriter = csv.writer(log)
cmd = db.execute(
"""
SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path
FROM "index" ix
LEFT JOIN reports r ON ix.id = r.index_id
WHERE ix.type = '10-K' AND r.id IS NULL AND
CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND
CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}
ORDER BY ix.date DESC
"""
.format(start=start, end=end))
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_insert = list()
for r in batch:
log_row = r
response = requests.get(r[5])
href = parse_href(response.content)
url = fix_url(href, r[5])
print(url)
filetype = mimetypes.guess_type(url)[0]
print(filetype)
filename = os.path.basename(urlparse(url).path)
print(filename)
to_insert.append((r[0], r[1], r[2], r[3], r[4], url,
filetype, filename))
logwriter.writerow(log_row)
db_insert(db, to_insert)
def parse_href(html_content):
root = to_doc(html_content)
elements = root.xpath('(//div[@id="formDiv"]//table//tr[2]/td[3]/a)')
if len(elements) == 0:
raise Exception('Unable to parse URL from index page')
href = elements[0].get('href')
return href
def fix_url(href, base_url):
path = href.replace('ix?doc=/', '')
url = urljoin(base_url, path)
return url
def to_doc(content):
try:
doc = etree.fromstring(content)
except:
doc = fromstring(content)
return doc
def db_connect():
db = sqlite3.connect('edgar_htm_idx.sqlite3')
return db
def db_insert(db: Connection, records):
c = db.cursor()
c.executemany(
'INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'
, records)
db.commit()
def db_update(db: Connection, records):
c = db.cursor()
c.executemany(
"""
UPDATE reports SET
is_analyzed = ?,
has_positive_sentiment = ?,
word_count = ?,
pos_count = ?,
neg_count = ?,
pos_words = ?,
neg_words = ?
where id = ?"""
, records)
db.commit()
<|reserved_special_token_0|>
@click.group()
def cli():
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@click.command()
@click.option('-s', '--batch-size', 'batch_size', default=50)
def analyze(batch_size):
db = db_connect()
db_ensure_init(db)
cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_update = list()
for r in batch:
print('Analyzing: ' + r[1])
response = requests.get(r[1])
text = parse_text(response.text)
print(text[0:400] + '\n[CLIPPED]')
result = tone_count_with_negation_check(lmdict, text)
has_positive_sentiment = result[1] > result[2]
to_update.append((True, has_positive_sentiment, result[0],
result[1], result[2], ' '.join(result[3]), ' '.join(result[
4]), r[0]))
db_update(db, to_update)
@click.command()
@click.argument('start', nargs=1)
@click.argument('end', nargs=1)
@click.option('-s', '--batch-size', 'batch_size', default=50)
def fetch_report_urls(start, end, batch_size):
"""Fetches and stores the 10-K report URLs"""
db = db_connect()
db_ensure_init(db)
with open('log.csv', 'w', newline='') as log:
logwriter = csv.writer(log)
cmd = db.execute(
"""
SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path
FROM "index" ix
LEFT JOIN reports r ON ix.id = r.index_id
WHERE ix.type = '10-K' AND r.id IS NULL AND
CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND
CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}
ORDER BY ix.date DESC
"""
.format(start=start, end=end))
for batch in iter(lambda : cmd.fetchmany(batch_size), []):
to_insert = list()
for r in batch:
log_row = r
response = requests.get(r[5])
href = parse_href(response.content)
url = fix_url(href, r[5])
print(url)
filetype = mimetypes.guess_type(url)[0]
print(filetype)
filename = os.path.basename(urlparse(url).path)
print(filename)
to_insert.append((r[0], r[1], r[2], r[3], r[4], url,
filetype, filename))
logwriter.writerow(log_row)
db_insert(db, to_insert)
def parse_href(html_content):
root = to_doc(html_content)
elements = root.xpath('(//div[@id="formDiv"]//table//tr[2]/td[3]/a)')
if len(elements) == 0:
raise Exception('Unable to parse URL from index page')
href = elements[0].get('href')
return href
def fix_url(href, base_url):
path = href.replace('ix?doc=/', '')
url = urljoin(base_url, path)
return url
def to_doc(content):
try:
doc = etree.fromstring(content)
except:
doc = fromstring(content)
return doc
def db_connect():
db = sqlite3.connect('edgar_htm_idx.sqlite3')
return db
def db_insert(db: Connection, records):
c = db.cursor()
c.executemany(
'INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'
, records)
db.commit()
def db_update(db: Connection, records):
c = db.cursor()
c.executemany(
"""
UPDATE reports SET
is_analyzed = ?,
has_positive_sentiment = ?,
word_count = ?,
pos_count = ?,
neg_count = ?,
pos_words = ?,
neg_words = ?
where id = ?"""
, records)
db.commit()
def db_ensure_init(db: Connection):
cur = db.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS "reports" (
"id" INTEGER NOT NULL,
"index_id" INTEGER UNIQUE,
"conm" TEXT,
"type" TEXT,
"cik" TEXT,
"date" TEXT,
"url" TEXT,
"filetype" TEXT,
"filename" TEXT,
"is_analyzed" INTEGER DEFAULT 0,
"has_positive_sentiment" INTEGER,
"word_count" INTEGER,
"pos_count" INTEGER,
"neg_count" INTEGER,
"pos_words" TEXT,
"neg_words" TEXT,
PRIMARY KEY("id" AUTOINCREMENT)
FOREIGN KEY (index_id) REFERENCES "index"(id)
);"""
)
@click.group()
def cli():
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import os
import click
import csv
import sqlite3
from sqlite3.dbapi2 import Connection
import requests
import mimetypes
from urllib.parse import urljoin, urlparse
from lxml.html.soupparser import fromstring
from lxml import etree
from lxml.etree import tostring
from analysis import lmdict, tone_count_with_negation_check
from parser import parse_text
@click.command()
@click.option('-s','--batch-size', 'batch_size', default=50)
def analyze(batch_size):
db = db_connect()
db_ensure_init(db)
cmd = db.execute("SELECT id, url FROM reports WHERE is_analyzed = 0")
for batch in iter(lambda: cmd.fetchmany(batch_size), []):
to_update = list()
for r in batch:
print("Analyzing: " + r[1])
response = requests.get(r[1])
text = parse_text(response.text)
print(text[0:400] + '\n[CLIPPED]')
# perform text analysis
result = tone_count_with_negation_check(lmdict, text)
has_positive_sentiment = result[1] > result[2]
# TODO: FIXME
# Here you should pass in all the variables that you want to store in the database
# Refer to "db_update" method in what order params should be passed
to_update.append((
True,
has_positive_sentiment,
result[0],
result[1],
result[2],
" ".join(result[3]),
" ".join(result[4]),
r[0]))
db_update(db, to_update)
@click.command()
@click.argument('start', nargs=1)
@click.argument('end', nargs=1)
@click.option('-s','--batch-size', 'batch_size', default=50)
def fetch_report_urls(start, end, batch_size):
"""Fetches and stores the 10-K report URLs"""
db = db_connect()
db_ensure_init(db)
with open('log.csv', 'w', newline='') as log:
logwriter = csv.writer(log)
cmd = db.execute("""
SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path
FROM "index" ix
LEFT JOIN reports r ON ix.id = r.index_id
WHERE ix.type = '10-K' AND r.id IS NULL AND
CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND
CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}
ORDER BY ix.date DESC
""".format(start=start, end=end))
for batch in iter(lambda: cmd.fetchmany(batch_size), []):
to_insert = list()
for r in batch:
# print(r)
log_row = r
response = requests.get(r[5])
href = parse_href(response.content)
url = fix_url(href, r[5])
print(url)
filetype = mimetypes.guess_type(url)[0]
print(filetype)
filename = os.path.basename(urlparse(url).path)
print(filename)
to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))
logwriter.writerow(log_row)
db_insert(db, to_insert)
def parse_href(html_content):
# print(html_content)
root = to_doc(html_content)
# f = open("debug_idx.html", "wb")
# f.write(tostring(root, pretty_print=True))
# f.close()
elements = root.xpath('(//div[@id="formDiv"]//table//tr[2]/td[3]/a)')
if len(elements) == 0:
raise Exception("Unable to parse URL from index page")
href = elements[0].get('href')
return href
def fix_url(href, base_url):
# if the url links to an interactive iXBRL adjust the URL to link to the normal html
# eg. https://www.sec.gov/ix?doc=/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm
# -> https://www.sec.gov/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm
path = href.replace('ix?doc=/', '')
# a relative url needs to be joined with the base url
url = urljoin(base_url, path)
return url
def to_doc(content):
# Try to parse as XML/XHTML and fallback to soupparser
try:
doc = etree.fromstring(content)
except:
doc = fromstring(content)
return doc
def db_connect():
db = sqlite3.connect('edgar_htm_idx.sqlite3')
return db
def db_insert(db: Connection, records):
c = db.cursor()
c.executemany("INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", records)
db.commit()
def db_update(db: Connection, records):
c = db.cursor()
c.executemany("""
UPDATE reports SET
is_analyzed = ?,
has_positive_sentiment = ?,
word_count = ?,
pos_count = ?,
neg_count = ?,
pos_words = ?,
neg_words = ?
where id = ?""", records)
db.commit()
def db_ensure_init(db: Connection):
cur = db.cursor()
# TODO: FIXME add any new columns you want to store in the database
cur.execute("""CREATE TABLE IF NOT EXISTS "reports" (
"id" INTEGER NOT NULL,
"index_id" INTEGER UNIQUE,
"conm" TEXT,
"type" TEXT,
"cik" TEXT,
"date" TEXT,
"url" TEXT,
"filetype" TEXT,
"filename" TEXT,
"is_analyzed" INTEGER DEFAULT 0,
"has_positive_sentiment" INTEGER,
"word_count" INTEGER,
"pos_count" INTEGER,
"neg_count" INTEGER,
"pos_words" TEXT,
"neg_words" TEXT,
PRIMARY KEY("id" AUTOINCREMENT)
FOREIGN KEY (index_id) REFERENCES "index"(id)
);""")
@click.group()
def cli():
pass
cli.add_command(fetch_report_urls)
cli.add_command(analyze)
if __name__ == '__main__':
cli()
|
flexible
|
{
"blob_id": "88e4e6647d4720d1c99f3e3438100790903921b5",
"index": 9163,
"step-1": "<mask token>\n\n\[email protected]()\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\n<mask token>\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\n<mask token>\n\n\[email protected]()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\[email protected]()\[email protected]('start', nargs=1)\[email protected]('end', nargs=1)\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n cmd = db.execute(\n \"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\"\n .format(start=start, end=end))\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n log_row = r\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url,\n filetype, filename))\n logwriter.writerow(log_row)\n db_insert(db, to_insert)\n\n\n<mask token>\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\n<mask token>\n\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\n\n<mask token>\n\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\n \"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\"\n , records)\n db.commit()\n\n\n<mask token>\n\n\[email protected]()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]()\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\[email protected]()\[email protected]('start', nargs=1)\[email protected]('end', nargs=1)\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n cmd = db.execute(\n \"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\"\n .format(start=start, end=end))\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n log_row = r\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url,\n filetype, filename))\n logwriter.writerow(log_row)\n db_insert(db, to_insert)\n\n\ndef parse_href(html_content):\n root = to_doc(html_content)\n elements = root.xpath('(//div[@id=\"formDiv\"]//table//tr[2]/td[3]/a)')\n if len(elements) == 0:\n raise Exception('Unable to parse URL from index page')\n href = elements[0].get('href')\n return href\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\ndef to_doc(content):\n try:\n doc = etree.fromstring(content)\n except:\n doc = fromstring(content)\n return doc\n\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\n\ndef db_insert(db: Connection, records):\n c = db.cursor()\n c.executemany(\n 'INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'\n , records)\n db.commit()\n\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\n \"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\"\n , records)\n db.commit()\n\n\n<mask token>\n\n\[email protected]()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\[email protected]()\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\[email protected]()\[email protected]('start', nargs=1)\[email protected]('end', nargs=1)\[email protected]('-s', '--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n cmd = db.execute(\n \"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\"\n .format(start=start, end=end))\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n log_row = r\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url,\n filetype, filename))\n logwriter.writerow(log_row)\n db_insert(db, to_insert)\n\n\ndef parse_href(html_content):\n root = to_doc(html_content)\n elements = root.xpath('(//div[@id=\"formDiv\"]//table//tr[2]/td[3]/a)')\n if len(elements) == 0:\n raise Exception('Unable to parse URL from index page')\n href = elements[0].get('href')\n return href\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\ndef to_doc(content):\n try:\n doc = etree.fromstring(content)\n except:\n doc = fromstring(content)\n return doc\n\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\n\ndef db_insert(db: Connection, records):\n c = db.cursor()\n c.executemany(\n 'INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'\n , records)\n db.commit()\n\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\n \"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\"\n , records)\n db.commit()\n\n\ndef db_ensure_init(db: Connection):\n cur = db.cursor()\n cur.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS \"reports\" (\n \"id\"\tINTEGER NOT NULL,\n \"index_id\" INTEGER UNIQUE,\n \"conm\" TEXT,\n \"type\" TEXT,\n \"cik\" TEXT,\n \"date\" TEXT,\n \"url\"\tTEXT,\n \"filetype\"\tTEXT,\n \"filename\"\tTEXT,\n \"is_analyzed\"\tINTEGER DEFAULT 0,\n \"has_positive_sentiment\" INTEGER,\n \"word_count\" INTEGER,\n \"pos_count\" INTEGER,\n \"neg_count\" INTEGER,\n \"pos_words\" TEXT,\n \"neg_words\" TEXT,\n PRIMARY KEY(\"id\" AUTOINCREMENT)\n FOREIGN KEY (index_id) REFERENCES \"index\"(id)\n );\"\"\"\n )\n\n\[email protected]()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-5": "import os\nimport click\nimport csv\nimport sqlite3\nfrom sqlite3.dbapi2 import Connection\nimport requests\nimport mimetypes\nfrom urllib.parse import urljoin, urlparse\nfrom lxml.html.soupparser import fromstring\nfrom lxml import etree\nfrom lxml.etree import tostring\nfrom analysis import lmdict, tone_count_with_negation_check\nfrom parser import parse_text\n\[email protected]()\[email protected]('-s','--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n\n cmd = db.execute(\"SELECT id, url FROM reports WHERE is_analyzed = 0\")\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print(\"Analyzing: \" + r[1])\n response = requests.get(r[1])\n\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n\n # perform text analysis\n result = tone_count_with_negation_check(lmdict, text)\n\n has_positive_sentiment = result[1] > result[2]\n\n # TODO: FIXME\n # Here you should pass in all the variables that you want to store in the database\n # Refer to \"db_update\" method in what order params should be passed\n to_update.append((\n True,\n has_positive_sentiment,\n result[0],\n result[1],\n result[2],\n \" \".join(result[3]),\n \" \".join(result[4]),\n r[0]))\n\n db_update(db, to_update)\n\n\[email protected]()\[email protected]('start', nargs=1)\[email protected]('end', nargs=1)\[email protected]('-s','--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n\n cmd = db.execute(\"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\".format(start=start, end=end))\n\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n # print(r)\n log_row = r\n\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))\n\n logwriter.writerow(log_row)\n\n db_insert(db, to_insert)\n\ndef parse_href(html_content):\n # print(html_content)\n root = to_doc(html_content)\n # f = open(\"debug_idx.html\", \"wb\")\n # f.write(tostring(root, pretty_print=True))\n # f.close()\n elements = root.xpath('(//div[@id=\"formDiv\"]//table//tr[2]/td[3]/a)')\n\n if len(elements) == 0:\n raise Exception(\"Unable to parse URL from index page\")\n\n href = elements[0].get('href')\n return href\n\ndef fix_url(href, base_url):\n # if the url links to an interactive iXBRL adjust the URL to link to the normal html\n # eg. https://www.sec.gov/ix?doc=/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm\n # -> https://www.sec.gov/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm\n path = href.replace('ix?doc=/', '')\n # a relative url needs to be joined with the base url\n url = urljoin(base_url, path)\n return url\n\ndef to_doc(content):\n # Try to parse as XML/XHTML and fallback to soupparser\n try:\n doc = etree.fromstring(content)\n except:\n doc = fromstring(content)\n\n return doc\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\ndef db_insert(db: Connection, records):\n c = db.cursor()\n c.executemany(\"INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", records)\n db.commit()\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\", records)\n db.commit()\n\ndef db_ensure_init(db: Connection):\n cur = db.cursor()\n # TODO: FIXME add any new columns you want to store in the database\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS \"reports\" (\n \"id\"\tINTEGER NOT NULL,\n \"index_id\" INTEGER UNIQUE,\n \"conm\" TEXT,\n \"type\" TEXT,\n \"cik\" TEXT,\n \"date\" TEXT,\n \"url\"\tTEXT,\n \"filetype\"\tTEXT,\n \"filename\"\tTEXT,\n \"is_analyzed\"\tINTEGER DEFAULT 0,\n \"has_positive_sentiment\" INTEGER,\n \"word_count\" INTEGER,\n \"pos_count\" INTEGER,\n \"neg_count\" INTEGER,\n \"pos_words\" TEXT,\n \"neg_words\" TEXT,\n PRIMARY KEY(\"id\" AUTOINCREMENT)\n FOREIGN KEY (index_id) REFERENCES \"index\"(id)\n );\"\"\")\n\n\[email protected]()\ndef cli():\n pass\n\ncli.add_command(fetch_report_urls)\ncli.add_command(analyze)\n\nif __name__ == '__main__':\n cli()\n",
"step-ids": [
3,
6,
9,
10,
13
]
}
|
[
3,
6,
9,
10,
13
] |
from django.db import transaction
from django.forms import inlineformset_factory
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, UpdateView
from forms.models.fund_operation import FundOperation
from forms.forms.fund_operation_forms import FundOperationForm, FundOperationLineForm, FundOperationFormSet
class FundOperationCreateView(CreateView):
model = FundOperation
template_name = "forms/fund_operation/create.html"
form_class = FundOperationForm
success_url = None
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
if self.request.POST:
data['lines'] = FundOperationFormSet(self.request.POST)
else:
data['lines'] = FundOperationFormSet()
return data
def form_valid(self, form):
context = self.get_context_data()
lines = context['lines']
with transaction.atomic():
form.instance.create_user = self.request.user
self.object = form.save()
if lines.is_valid():
lines.instance = self.object
lines.save()
return super().form_valid(form)
def get_success_url(self):
return reverse_lazy('fund_operation:fund_operation_create')
class FundOperationUpdateView(UpdateView):
model =FundOperation
template_name = "forms/fund_operation/update.html"
form_class = FundOperationForm
success_url = None
def _get_initial_data(self):
if self.object.lines.all():
return None
initial = [
{
'body': 'प्रदेश सरकार',
},
{
'body': 'संघीय सरकार',
},
{
'body': 'स्थानीय तह',
},
{
'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी',
},
{
'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था',
},
{
'body': 'गैरसरकारी संस्था',
},
]
return initial
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
initial = self._get_initial_data()
if self.request.POST:
data['lines'] = FundOperationFormSet(
self.request.POST,
instance=self.object,
initial=initial
)
else:
data['lines'] = FundOperationFormSet(
instance=self.object,
initial=initial
)
data['lines'].extra = len(initial) if initial else 1
return data
def form_valid(self, form):
context = self.get_context_data()
lines = context['lines']
with transaction.atomic():
form.instance.create_user = self.request.user
self.object = form.save()
if lines.is_valid():
lines.instance = self.object
lines.save()
else:
return self.form_invalid(form, lines)
return super().form_valid(form)
def form_invalid(self, form, lines=None):
return self.render_to_response(self.get_context_data(form=form, lines=lines))
def get_success_url(self):
return reverse_lazy('fund_operation:fund_operation_update', kwargs={'pk': self.object.pk})
|
normal
|
{
"blob_id": "3c2fb3d09edab92da08ac8850f650a2fa22fad92",
"index": 8806,
"step-1": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n <mask token>\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n",
"step-2": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n <mask token>\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n",
"step-3": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_create')\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n",
"step-4": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n model = FundOperation\n template_name = 'forms/fund_operation/create.html'\n form_class = FundOperationForm\n success_url = None\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_create')\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n",
"step-5": "from django.db import transaction\nfrom django.forms import inlineformset_factory\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView, UpdateView\nfrom forms.models.fund_operation import FundOperation\nfrom forms.forms.fund_operation_forms import FundOperationForm, FundOperationLineForm, FundOperationFormSet\n\n\nclass FundOperationCreateView(CreateView):\n model = FundOperation\n template_name = \"forms/fund_operation/create.html\"\n form_class = FundOperationForm\n success_url = None\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_create')\n\n\nclass FundOperationUpdateView(UpdateView):\n model =FundOperation\n template_name = \"forms/fund_operation/update.html\"\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n\n initial = [\n {\n 'body': 'प्रदेश सरकार',\n },\n {\n 'body': 'संघीय सरकार',\n },\n {\n 'body': 'स्थानीय तह',\n },\n {\n 'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी',\n },\n {\n 'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था',\n },\n {\n 'body': 'गैरसरकारी संस्था',\n },\n ]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(\n self.request.POST,\n instance=self.object,\n initial=initial\n )\n else:\n data['lines'] = FundOperationFormSet(\n instance=self.object,\n initial=initial\n )\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form, lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs={'pk': self.object.pk})\n",
"step-ids": [
9,
10,
11,
12,
14
]
}
|
[
9,
10,
11,
12,
14
] |
#!/usr/bin/env python
# coding: utf-8
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host = '192.168.10.28'
))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print "[x] Sent 'Hello World!"
connection.close()
|
normal
|
{
"blob_id": "a9a60d4bee45a4012d004bacac7812160ed4241c",
"index": 4012,
"step-1": "#!/usr/bin/env python\n# coding: utf-8\n\nimport pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host = '192.168.10.28'\n))\nchannel = connection.channel()\nchannel.queue_declare(queue='hello')\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='Hello World!')\nprint \"[x] Sent 'Hello World!\"\nconnection.close()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# генераторы списков и словарей
# lists
my_list = [1, 2, 3, 4, 5]
new_list = []
for i in my_list:
new_list.append(i**2)
new_list_comp = [el**2 for el in my_list]
lines = [line.strip() for line in open("text.txt")]
new_list_1 = [el for el in my_list if el % 2 == 0]
str_1 = 'abc'
str_2 = 'def'
str_3 = 'gh'
new_list_2 = [i+j+k for i in str_1 for j in str_2 for k in str_3]
# словари и множества
my_set = {el**2 for el in range(10)}
my_dict = {el: el**2 for el in range(5)}
print(my_dict)
my_list_of_floats = [2.4324324, 5.3243234, 6.23424]
new_list_round = [round(el, 2) for el in my_list_of_floats]
print(new_list_round)
|
normal
|
{
"blob_id": "e54eea2261517a2b15fde23c46b3fe75c0efec64",
"index": 7746,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in my_list:\n new_list.append(i ** 2)\n<mask token>\nprint(my_dict)\n<mask token>\nprint(new_list_round)\n",
"step-3": "my_list = [1, 2, 3, 4, 5]\nnew_list = []\nfor i in my_list:\n new_list.append(i ** 2)\nnew_list_comp = [(el ** 2) for el in my_list]\nlines = [line.strip() for line in open('text.txt')]\nnew_list_1 = [el for el in my_list if el % 2 == 0]\nstr_1 = 'abc'\nstr_2 = 'def'\nstr_3 = 'gh'\nnew_list_2 = [(i + j + k) for i in str_1 for j in str_2 for k in str_3]\nmy_set = {(el ** 2) for el in range(10)}\nmy_dict = {el: (el ** 2) for el in range(5)}\nprint(my_dict)\nmy_list_of_floats = [2.4324324, 5.3243234, 6.23424]\nnew_list_round = [round(el, 2) for el in my_list_of_floats]\nprint(new_list_round)\n",
"step-4": "# генераторы списков и словарей\n\n# lists\nmy_list = [1, 2, 3, 4, 5]\nnew_list = []\nfor i in my_list:\n new_list.append(i**2)\n\nnew_list_comp = [el**2 for el in my_list]\n\nlines = [line.strip() for line in open(\"text.txt\")]\n\nnew_list_1 = [el for el in my_list if el % 2 == 0]\n\nstr_1 = 'abc'\nstr_2 = 'def'\nstr_3 = 'gh'\n\nnew_list_2 = [i+j+k for i in str_1 for j in str_2 for k in str_3]\n\n# словари и множества\nmy_set = {el**2 for el in range(10)}\n\nmy_dict = {el: el**2 for el in range(5)}\nprint(my_dict)\n\nmy_list_of_floats = [2.4324324, 5.3243234, 6.23424]\n\nnew_list_round = [round(el, 2) for el in my_list_of_floats]\nprint(new_list_round)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def load_dataframe(dataset):
return pd.read_csv(dataset)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xs.append(x1)
for i in range(t // 2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xs.append(x1)
for i in range(t // 2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv',
'./input/test.csv'])
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=
True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
len_train = len(train)
col_var = list(raw.columns[2:])
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_2') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_3') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
feats = [col for col in raw.columns.values if col not in ['ID_code',
'target']]
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',
'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,
'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,
'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction':
0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=
list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,
y_train.values)):
print('fold n°{}'.format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param, trn_data, round_max, valid_sets=[
trn_data, val_data], early_stopping_rounds=
round_early_stopping, verbose_eval=1000, evals_result=
evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration
) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration
) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
print('auc score: {:.5f}'.format(cv_score))
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(
cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder,
'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=
False)
<|reserved_special_token_1|>
import gc
import sys
import time
import warnings
import multiprocessing
import numpy as np
import pandas as pd
import lightgbm as lgb
from os import path, makedirs
from tqdm import tqdm
from utils import Logger
from datetime import datetime
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xs.append(x1)
for i in range(t // 2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c + 200, c + 400]]
np.random.shuffle(val)
x1[:, [c, c + 200, c + 400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv',
'./input/test.csv'])
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=
True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
len_train = len(train)
col_var = list(raw.columns[2:])
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_2') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [(col + '_repeat_3') for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
feats = [col for col in raw.columns.values if col not in ['ID_code',
'target']]
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',
'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,
'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,
'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction':
0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=
list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,
y_train.values)):
print('fold n°{}'.format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param, trn_data, round_max, valid_sets=[
trn_data, val_data], early_stopping_rounds=
round_early_stopping, verbose_eval=1000, evals_result=
evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration
) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration
) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))
print('auc score: {:.5f}'.format(cv_score))
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(
cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder,
'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=
False)
<|reserved_special_token_1|>
import gc
import sys
import time
import warnings
import multiprocessing
import numpy as np
import pandas as pd
import lightgbm as lgb
from os import path, makedirs
from tqdm import tqdm
from utils import Logger
from datetime import datetime
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
# ======================================================================= Method
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c+200, c+400]]
np.random.shuffle(val)
x1[:, [c, c+200, c+400]] = val
xs.append(x1)
for i in range(t//2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c+200, c+400]]
np.random.shuffle(val)
x1[:, [c, c+200, c+400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
# ======================================================================= Main
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
# =================================================================== Params
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
# =================================================================== Load Data
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv', './input/test.csv'])
# === fake sample
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
# ============================== Extra Feature
len_train = len(train)
col_var = list(raw.columns[2:])
# === replace value(frequency=1) to NA
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [col + '_repeat_2' for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
# === replace value(frequency=1/2) to NA
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [col + '_repeat_3' for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
# === logging
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start)/60))
# =================================================================== PreProcess
feats = [col for col in raw.columns.values if col not in ['ID_code', 'target']]
# =================================================================== Model
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {
'objective': 'binary',
'boosting': 'gbdt',
'metric': 'auc',
'verbosity': -1,
'n_jobs': 11,
'random_state': 1993,
'learning_rate': 0.01,
'num_leaves': 8,
'max_depth': -1,
'feature_fraction': 0.05,
'bagging_freq': 5,
'bagging_fraction': 0.4,
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
# === training
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values, y_train.values)):
print("fold n°{}".format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param,
trn_data,
round_max,
valid_sets=[trn_data, val_data],
early_stopping_rounds=round_early_stopping,
verbose_eval=1000,
evals_result=evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start)/60))
print('auc score: {:.5f}'.format(cv_score))
# =================================================================== Saving File
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder, 'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=False)
|
flexible
|
{
"blob_id": "74c875d00c665aabbcad4e23e6059c3445d5e7bd",
"index": 1597,
"step-1": "<mask token>\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\ndef augment(x, y, t=2):\n xs, xn = [], []\n for i in range(t):\n mask = y > 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xs.append(x1)\n for i in range(t // 2):\n mask = y == 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xn.append(x1)\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x, xs, xn])\n y = np.concatenate([y, ys, yn])\n return x, y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\ndef augment(x, y, t=2):\n xs, xn = [], []\n for i in range(t):\n mask = y > 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xs.append(x1)\n for i in range(t // 2):\n mask = y == 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xn.append(x1)\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x, xs, xn])\n y = np.concatenate([y, ys, yn])\n return x, y\n\n\nif __name__ == '__main__':\n gc.enable()\n pd.set_option('max_rows', None)\n pd.set_option('max_columns', None)\n warnings.simplefilter('ignore', UserWarning)\n top_folder = './output'\n today = datetime.today()\n now = today.strftime('%m%d-%H%M')\n log_name = now + '.txt'\n sys.stdout = Logger(path.join(top_folder, log_name))\n seed_np = 1011\n np.random.seed(seed_np)\n print('numpy seed: {}'.format(seed_np))\n start = time.time()\n with multiprocessing.Pool() as pool:\n train, test = pool.map(load_dataframe, ['./input/train.csv',\n './input/test.csv'])\n df_test = test.drop(columns=['ID_code']).values\n unique_samples = []\n unique_count = np.zeros_like(df_test)\n for feature in tqdm(range(df_test.shape[1])):\n _, index_, count_ = np.unique(df_test[:, feature], return_counts=\n True, return_index=True)\n unique_count[index_[count_ == 1], feature] += 1\n idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\n idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\n synthetic = test.loc[idx_synthetic]\n test = test.loc[idx_score]\n raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)\n len_train = len(train)\n col_var = list(raw.columns[2:])\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[cnt == 1].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_2') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[np.isin(cnt, [1, 2])].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_3') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)\n print('data: {}'.format(raw.shape))\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n feats = [col for col in raw.columns.values if col not in ['ID_code',\n 'target']]\n train = raw[:len_train]\n test = raw[len_train:].copy()\n x_train = train[feats]\n y_train = train['target']\n x_test = test[feats]\n print('trn_x: {}'.format(x_train.shape))\n print('x_test: {}'.format(x_test.shape))\n param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',\n 'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,\n 'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,\n 'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction': \n 0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}\n print('model params:\\n{}'.format(pd.Series(list(param.values()), index=\n list(param.keys()))))\n seed_fold = 26\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)\n print('StratifiedKFold seed: {}'.format(seed_fold))\n round_max = 30000\n round_early_stopping = 3000\n print('num_round: {}'.format(round_max))\n print('early_stopping_round: {}'.format(round_early_stopping))\n oof = np.zeros(len(x_train))\n predictions = np.zeros(len(x_test))\n start = time.time()\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,\n y_train.values)):\n print('fold n°{}'.format(fold_))\n trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]\n val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]\n N = 5\n for i in range(N):\n X_t, y_t = augment(trn_x.values, trn_y.values)\n X_t = pd.DataFrame(X_t, columns=feats)\n trn_data = lgb.Dataset(X_t, label=y_t)\n val_data = lgb.Dataset(val_x, label=val_y)\n evals_result = {}\n clf = lgb.train(param, trn_data, round_max, valid_sets=[\n trn_data, val_data], early_stopping_rounds=\n round_early_stopping, verbose_eval=1000, evals_result=\n evals_result)\n oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration\n ) / N\n predictions += clf.predict(x_test, num_iteration=clf.best_iteration\n ) / folds.n_splits / N\n fold_score = roc_auc_score(val_y, oof[val_idx])\n print('fold {} auc score: {:.5f}'.format(fold_, fold_score))\n cv_score = roc_auc_score(y_train, oof)\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n print('auc score: {:.5f}'.format(cv_score))\n sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(\n cv_score, 5)))\n makedirs(sub_folder, exist_ok=True)\n test['target'] = predictions\n test[['ID_code', 'target']].to_csv(path.join(sub_folder,\n 'submission.csv'), index=False)\n raw['oof'] = np.concatenate([oof, predictions], axis=0)\n raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=\n False)\n",
"step-4": "import gc\nimport sys\nimport time\nimport warnings\nimport multiprocessing\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom os import path, makedirs\nfrom tqdm import tqdm\nfrom utils import Logger\nfrom datetime import datetime\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\ndef augment(x, y, t=2):\n xs, xn = [], []\n for i in range(t):\n mask = y > 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xs.append(x1)\n for i in range(t // 2):\n mask = y == 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xn.append(x1)\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x, xs, xn])\n y = np.concatenate([y, ys, yn])\n return x, y\n\n\nif __name__ == '__main__':\n gc.enable()\n pd.set_option('max_rows', None)\n pd.set_option('max_columns', None)\n warnings.simplefilter('ignore', UserWarning)\n top_folder = './output'\n today = datetime.today()\n now = today.strftime('%m%d-%H%M')\n log_name = now + '.txt'\n sys.stdout = Logger(path.join(top_folder, log_name))\n seed_np = 1011\n np.random.seed(seed_np)\n print('numpy seed: {}'.format(seed_np))\n start = time.time()\n with multiprocessing.Pool() as pool:\n train, test = pool.map(load_dataframe, ['./input/train.csv',\n './input/test.csv'])\n df_test = test.drop(columns=['ID_code']).values\n unique_samples = []\n unique_count = np.zeros_like(df_test)\n for feature in tqdm(range(df_test.shape[1])):\n _, index_, count_ = np.unique(df_test[:, feature], return_counts=\n True, return_index=True)\n unique_count[index_[count_ == 1], feature] += 1\n idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\n idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\n synthetic = test.loc[idx_synthetic]\n test = test.loc[idx_score]\n raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)\n len_train = len(train)\n col_var = list(raw.columns[2:])\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[cnt == 1].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_2') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[np.isin(cnt, [1, 2])].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_3') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)\n print('data: {}'.format(raw.shape))\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n feats = [col for col in raw.columns.values if col not in ['ID_code',\n 'target']]\n train = raw[:len_train]\n test = raw[len_train:].copy()\n x_train = train[feats]\n y_train = train['target']\n x_test = test[feats]\n print('trn_x: {}'.format(x_train.shape))\n print('x_test: {}'.format(x_test.shape))\n param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',\n 'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,\n 'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,\n 'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction': \n 0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}\n print('model params:\\n{}'.format(pd.Series(list(param.values()), index=\n list(param.keys()))))\n seed_fold = 26\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)\n print('StratifiedKFold seed: {}'.format(seed_fold))\n round_max = 30000\n round_early_stopping = 3000\n print('num_round: {}'.format(round_max))\n print('early_stopping_round: {}'.format(round_early_stopping))\n oof = np.zeros(len(x_train))\n predictions = np.zeros(len(x_test))\n start = time.time()\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,\n y_train.values)):\n print('fold n°{}'.format(fold_))\n trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]\n val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]\n N = 5\n for i in range(N):\n X_t, y_t = augment(trn_x.values, trn_y.values)\n X_t = pd.DataFrame(X_t, columns=feats)\n trn_data = lgb.Dataset(X_t, label=y_t)\n val_data = lgb.Dataset(val_x, label=val_y)\n evals_result = {}\n clf = lgb.train(param, trn_data, round_max, valid_sets=[\n trn_data, val_data], early_stopping_rounds=\n round_early_stopping, verbose_eval=1000, evals_result=\n evals_result)\n oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration\n ) / N\n predictions += clf.predict(x_test, num_iteration=clf.best_iteration\n ) / folds.n_splits / N\n fold_score = roc_auc_score(val_y, oof[val_idx])\n print('fold {} auc score: {:.5f}'.format(fold_, fold_score))\n cv_score = roc_auc_score(y_train, oof)\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n print('auc score: {:.5f}'.format(cv_score))\n sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(\n cv_score, 5)))\n makedirs(sub_folder, exist_ok=True)\n test['target'] = predictions\n test[['ID_code', 'target']].to_csv(path.join(sub_folder,\n 'submission.csv'), index=False)\n raw['oof'] = np.concatenate([oof, predictions], axis=0)\n raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=\n False)\n",
"step-5": "import gc\r\nimport sys\r\nimport time\r\nimport warnings\r\nimport multiprocessing\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport lightgbm as lgb\r\n\r\nfrom os import path, makedirs\r\nfrom tqdm import tqdm\r\nfrom utils import Logger\r\nfrom datetime import datetime\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.model_selection import StratifiedKFold\r\n\r\n\r\n# ======================================================================= Method\r\ndef load_dataframe(dataset):\r\n return pd.read_csv(dataset)\r\n\r\n\r\ndef augment(x, y, t=2):\r\n xs, xn = [], []\r\n for i in range(t):\r\n mask = y > 0\r\n x1 = x[mask].copy()\r\n for c in range(200):\r\n val = x1[:, [c, c+200, c+400]]\r\n np.random.shuffle(val)\r\n x1[:, [c, c+200, c+400]] = val\r\n xs.append(x1)\r\n\r\n for i in range(t//2):\r\n mask = y == 0\r\n x1 = x[mask].copy()\r\n for c in range(200):\r\n val = x1[:, [c, c+200, c+400]]\r\n np.random.shuffle(val)\r\n x1[:, [c, c+200, c+400]] = val\r\n xn.append(x1)\r\n\r\n xs = np.vstack(xs)\r\n xn = np.vstack(xn)\r\n ys = np.ones(xs.shape[0])\r\n yn = np.zeros(xn.shape[0])\r\n x = np.vstack([x, xs, xn])\r\n y = np.concatenate([y, ys, yn])\r\n return x, y\r\n\r\n\r\n# ======================================================================= Main\r\nif __name__ == '__main__':\r\n gc.enable()\r\n pd.set_option('max_rows', None)\r\n pd.set_option('max_columns', None)\r\n warnings.simplefilter('ignore', UserWarning)\r\n\r\n # =================================================================== Params\r\n top_folder = './output'\r\n\r\n today = datetime.today()\r\n now = today.strftime('%m%d-%H%M')\r\n log_name = now + '.txt'\r\n sys.stdout = Logger(path.join(top_folder, log_name))\r\n\r\n seed_np = 1011\r\n np.random.seed(seed_np)\r\n print('numpy seed: {}'.format(seed_np))\r\n\r\n # =================================================================== Load Data\r\n start = time.time()\r\n with multiprocessing.Pool() as pool:\r\n train, test = pool.map(load_dataframe, ['./input/train.csv', './input/test.csv'])\r\n\r\n # === fake sample\r\n df_test = test.drop(columns=['ID_code']).values\r\n\r\n unique_samples = []\r\n unique_count = np.zeros_like(df_test)\r\n for feature in tqdm(range(df_test.shape[1])):\r\n _, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True)\r\n unique_count[index_[count_ == 1], feature] += 1\r\n\r\n idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\r\n idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\r\n\r\n synthetic = test.loc[idx_synthetic]\r\n test = test.loc[idx_score]\r\n\r\n raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)\r\n\r\n # ============================== Extra Feature\r\n len_train = len(train)\r\n col_var = list(raw.columns[2:])\r\n\r\n # === replace value(frequency=1) to NA\r\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\r\n for col in tqdm(col_var):\r\n cnt = raw[col].value_counts()\r\n val = cnt[cnt == 1].index\r\n mask.loc[np.isin(raw[col], val), col] = 0\r\n col_repeat = [col + '_repeat_2' for col in col_var]\r\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\r\n\r\n # === replace value(frequency=1/2) to NA\r\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\r\n for col in tqdm(col_var):\r\n cnt = raw[col].value_counts()\r\n val = cnt[np.isin(cnt, [1, 2])].index\r\n mask.loc[np.isin(raw[col], val), col] = 0\r\n col_repeat = [col + '_repeat_3' for col in col_var]\r\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\r\n\r\n raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)\r\n\r\n # === logging\r\n print('data: {}'.format(raw.shape))\r\n print('elapsed time: {:.1f} min'.format((time.time() - start)/60))\r\n\r\n # =================================================================== PreProcess\r\n feats = [col for col in raw.columns.values if col not in ['ID_code', 'target']]\r\n\r\n # =================================================================== Model\r\n train = raw[:len_train]\r\n test = raw[len_train:].copy()\r\n\r\n x_train = train[feats]\r\n y_train = train['target']\r\n x_test = test[feats]\r\n\r\n print('trn_x: {}'.format(x_train.shape))\r\n print('x_test: {}'.format(x_test.shape))\r\n\r\n param = {\r\n 'objective': 'binary',\r\n 'boosting': 'gbdt',\r\n 'metric': 'auc',\r\n 'verbosity': -1,\r\n 'n_jobs': 11,\r\n 'random_state': 1993,\r\n 'learning_rate': 0.01,\r\n\r\n 'num_leaves': 8,\r\n 'max_depth': -1,\r\n 'feature_fraction': 0.05,\r\n 'bagging_freq': 5,\r\n 'bagging_fraction': 0.4,\r\n 'min_data_in_leaf': 80,\r\n 'min_sum_hessian_in_leaf': 10.0,\r\n }\r\n print('model params:\\n{}'.format(pd.Series(list(param.values()), index=list(param.keys()))))\r\n\r\n seed_fold = 26\r\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)\r\n print('StratifiedKFold seed: {}'.format(seed_fold))\r\n\r\n round_max = 30000\r\n round_early_stopping = 3000\r\n print('num_round: {}'.format(round_max))\r\n print('early_stopping_round: {}'.format(round_early_stopping))\r\n\r\n # === training\r\n oof = np.zeros(len(x_train))\r\n predictions = np.zeros(len(x_test))\r\n\r\n start = time.time()\r\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values, y_train.values)):\r\n print(\"fold n°{}\".format(fold_))\r\n\r\n trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]\r\n val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]\r\n\r\n N = 5\r\n for i in range(N):\r\n X_t, y_t = augment(trn_x.values, trn_y.values)\r\n X_t = pd.DataFrame(X_t, columns=feats)\r\n\r\n trn_data = lgb.Dataset(X_t, label=y_t)\r\n val_data = lgb.Dataset(val_x, label=val_y)\r\n\r\n evals_result = {}\r\n clf = lgb.train(param,\r\n trn_data,\r\n round_max,\r\n valid_sets=[trn_data, val_data],\r\n early_stopping_rounds=round_early_stopping,\r\n verbose_eval=1000,\r\n evals_result=evals_result)\r\n\r\n oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration) / N\r\n predictions += clf.predict(x_test, num_iteration=clf.best_iteration) / folds.n_splits / N\r\n\r\n fold_score = roc_auc_score(val_y, oof[val_idx])\r\n print('fold {} auc score: {:.5f}'.format(fold_, fold_score))\r\n\r\n cv_score = roc_auc_score(y_train, oof)\r\n print('elapsed time: {:.1f} min'.format((time.time() - start)/60))\r\n print('auc score: {:.5f}'.format(cv_score))\r\n\r\n # =================================================================== Saving File\r\n sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(cv_score, 5)))\r\n makedirs(sub_folder, exist_ok=True)\r\n\r\n test['target'] = predictions\r\n test[['ID_code', 'target']].to_csv(path.join(sub_folder, 'submission.csv'), index=False)\r\n\r\n raw['oof'] = np.concatenate([oof, predictions], axis=0)\r\n raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=False)\r\n\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_parser(parser_creator=None):
parser = make_parser(parser_creator=parser_creator, formatter_class=
argparse.RawDescriptionHelpFormatter, description=
'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)
parser.add_argument('--ray-address', default=None, type=str, help=
'Connect to an existing Ray cluster at this address instead of starting a new one.'
)
parser.add_argument('--ray-num-cpus', default=None, type=int, help=
'--num-cpus to use if starting a new cluster.')
parser.add_argument('--ray-num-gpus', default=None, type=int, help=
'--num-gpus to use if starting a new cluster.')
parser.add_argument('--ray-num-nodes', default=None, type=int, help=
'Emulate multiple cluster nodes for debugging.')
parser.add_argument('--ray-redis-max-memory', default=None, type=int,
help='--redis-max-memory to use if starting a new cluster.')
parser.add_argument('--ray-memory', default=None, type=int, help=
'--memory to use if starting a new cluster.')
parser.add_argument('--ray-object-store-memory', default=None, type=int,
help='--object-store-memory to use if starting a new cluster.')
parser.add_argument('--experiment-name', default='default', type=str,
help='Name of the subdirectory under `local_dir` to put results in.')
parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=
str, help=
"Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument('--upload-dir', default='', type=str, help=
'Optional URI to sync training results to (e.g. s3://bucket).')
parser.add_argument('-v', action='store_true', help=
'Whether to use INFO level logging.')
parser.add_argument('-vv', action='store_true', help=
'Whether to use DEBUG level logging.')
parser.add_argument('--resume', action='store_true', help=
'Whether to attempt to resume previous Tune experiments.')
parser.add_argument('--torch', action='store_true', help=
'Whether to use PyTorch (instead of tf) as the DL framework.')
parser.add_argument('--eager', action='store_true', help=
'Whether to attempt to enable TF eager execution.')
parser.add_argument('--trace', action='store_true', help=
'Whether to attempt to enable tracing for eager mode.')
parser.add_argument('--log-flatland-stats', action='store_true',
default=True, help=
'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'
)
parser.add_argument('-e', '--eval', action='store_true', help=
'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'
)
parser.add_argument('--bind-all', action='store_true', default=False,
help=
'Whether to expose on network (binding on all network interfaces).')
parser.add_argument('--env', default=None, type=str, help=
'The gym environment to use.')
parser.add_argument('--queue-trials', action='store_true', help=
'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'
)
parser.add_argument('-f', '--config-file', default=None, type=str, help
=
'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'
)
return parser
<|reserved_special_token_1|>
<|reserved_special_token_0|>
EXAMPLE_USAGE = """
Training example:
python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats
Training with Config:
python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(parser_creator=parser_creator, formatter_class=
argparse.RawDescriptionHelpFormatter, description=
'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)
parser.add_argument('--ray-address', default=None, type=str, help=
'Connect to an existing Ray cluster at this address instead of starting a new one.'
)
parser.add_argument('--ray-num-cpus', default=None, type=int, help=
'--num-cpus to use if starting a new cluster.')
parser.add_argument('--ray-num-gpus', default=None, type=int, help=
'--num-gpus to use if starting a new cluster.')
parser.add_argument('--ray-num-nodes', default=None, type=int, help=
'Emulate multiple cluster nodes for debugging.')
parser.add_argument('--ray-redis-max-memory', default=None, type=int,
help='--redis-max-memory to use if starting a new cluster.')
parser.add_argument('--ray-memory', default=None, type=int, help=
'--memory to use if starting a new cluster.')
parser.add_argument('--ray-object-store-memory', default=None, type=int,
help='--object-store-memory to use if starting a new cluster.')
parser.add_argument('--experiment-name', default='default', type=str,
help='Name of the subdirectory under `local_dir` to put results in.')
parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=
str, help=
"Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument('--upload-dir', default='', type=str, help=
'Optional URI to sync training results to (e.g. s3://bucket).')
parser.add_argument('-v', action='store_true', help=
'Whether to use INFO level logging.')
parser.add_argument('-vv', action='store_true', help=
'Whether to use DEBUG level logging.')
parser.add_argument('--resume', action='store_true', help=
'Whether to attempt to resume previous Tune experiments.')
parser.add_argument('--torch', action='store_true', help=
'Whether to use PyTorch (instead of tf) as the DL framework.')
parser.add_argument('--eager', action='store_true', help=
'Whether to attempt to enable TF eager execution.')
parser.add_argument('--trace', action='store_true', help=
'Whether to attempt to enable tracing for eager mode.')
parser.add_argument('--log-flatland-stats', action='store_true',
default=True, help=
'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'
)
parser.add_argument('-e', '--eval', action='store_true', help=
'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'
)
parser.add_argument('--bind-all', action='store_true', default=False,
help=
'Whether to expose on network (binding on all network interfaces).')
parser.add_argument('--env', default=None, type=str, help=
'The gym environment to use.')
parser.add_argument('--queue-trials', action='store_true', help=
'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'
)
parser.add_argument('-f', '--config-file', default=None, type=str, help
=
'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'
)
return parser
<|reserved_special_token_1|>
import argparse
from ray.tune.config_parser import make_parser
from ray.tune.result import DEFAULT_RESULTS_DIR
EXAMPLE_USAGE = """
Training example:
python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats
Training with Config:
python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(parser_creator=parser_creator, formatter_class=
argparse.RawDescriptionHelpFormatter, description=
'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)
parser.add_argument('--ray-address', default=None, type=str, help=
'Connect to an existing Ray cluster at this address instead of starting a new one.'
)
parser.add_argument('--ray-num-cpus', default=None, type=int, help=
'--num-cpus to use if starting a new cluster.')
parser.add_argument('--ray-num-gpus', default=None, type=int, help=
'--num-gpus to use if starting a new cluster.')
parser.add_argument('--ray-num-nodes', default=None, type=int, help=
'Emulate multiple cluster nodes for debugging.')
parser.add_argument('--ray-redis-max-memory', default=None, type=int,
help='--redis-max-memory to use if starting a new cluster.')
parser.add_argument('--ray-memory', default=None, type=int, help=
'--memory to use if starting a new cluster.')
parser.add_argument('--ray-object-store-memory', default=None, type=int,
help='--object-store-memory to use if starting a new cluster.')
parser.add_argument('--experiment-name', default='default', type=str,
help='Name of the subdirectory under `local_dir` to put results in.')
parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=
str, help=
"Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument('--upload-dir', default='', type=str, help=
'Optional URI to sync training results to (e.g. s3://bucket).')
parser.add_argument('-v', action='store_true', help=
'Whether to use INFO level logging.')
parser.add_argument('-vv', action='store_true', help=
'Whether to use DEBUG level logging.')
parser.add_argument('--resume', action='store_true', help=
'Whether to attempt to resume previous Tune experiments.')
parser.add_argument('--torch', action='store_true', help=
'Whether to use PyTorch (instead of tf) as the DL framework.')
parser.add_argument('--eager', action='store_true', help=
'Whether to attempt to enable TF eager execution.')
parser.add_argument('--trace', action='store_true', help=
'Whether to attempt to enable tracing for eager mode.')
parser.add_argument('--log-flatland-stats', action='store_true',
default=True, help=
'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'
)
parser.add_argument('-e', '--eval', action='store_true', help=
'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'
)
parser.add_argument('--bind-all', action='store_true', default=False,
help=
'Whether to expose on network (binding on all network interfaces).')
parser.add_argument('--env', default=None, type=str, help=
'The gym environment to use.')
parser.add_argument('--queue-trials', action='store_true', help=
'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'
)
parser.add_argument('-f', '--config-file', default=None, type=str, help
=
'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'
)
return parser
<|reserved_special_token_1|>
import argparse
from ray.tune.config_parser import make_parser
from ray.tune.result import DEFAULT_RESULTS_DIR
EXAMPLE_USAGE = """
Training example:
python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats
Training with Config:
python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(
parser_creator=parser_creator,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Train a reinforcement learning agent.",
epilog=EXAMPLE_USAGE)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument(
"--ray-address",
default=None,
type=str,
help="Connect to an existing Ray cluster at this address instead "
"of starting a new one.")
parser.add_argument(
"--ray-num-cpus",
default=None,
type=int,
help="--num-cpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-gpus",
default=None,
type=int,
help="--num-gpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-nodes",
default=None,
type=int,
help="Emulate multiple cluster nodes for debugging.")
parser.add_argument(
"--ray-redis-max-memory",
default=None,
type=int,
help="--redis-max-memory to use if starting a new cluster.")
parser.add_argument(
"--ray-memory",
default=None,
type=int,
help="--memory to use if starting a new cluster.")
parser.add_argument(
"--ray-object-store-memory",
default=None,
type=int,
help="--object-store-memory to use if starting a new cluster.")
parser.add_argument(
"--experiment-name",
default="default",
type=str,
help="Name of the subdirectory under `local_dir` to put results in.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"-v", action="store_true", help="Whether to use INFO level logging.")
parser.add_argument(
"-vv", action="store_true", help="Whether to use DEBUG level logging.")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume previous Tune experiments.")
parser.add_argument(
"--torch",
action="store_true",
help="Whether to use PyTorch (instead of tf) as the DL framework.")
parser.add_argument(
"--eager",
action="store_true",
help="Whether to attempt to enable TF eager execution.")
parser.add_argument(
"--trace",
action="store_true",
help="Whether to attempt to enable tracing for eager mode.")
parser.add_argument(
"--log-flatland-stats",
action="store_true",
default=True,
help="Whether to log additional flatland specfic metrics such as percentage complete or normalized score.")
parser.add_argument(
"-e",
"--eval",
action="store_true",
help="Whether to run evaluation. Default evaluation config is default.yaml "
"to use custom evaluation config set (eval_generator:high_eval) under configs")
parser.add_argument(
"--bind-all",
action="store_true",
default=False,
help="Whether to expose on network (binding on all network interfaces).")
parser.add_argument(
"--env", default=None, type=str, help="The gym environment to use.")
parser.add_argument(
"--queue-trials",
action="store_true",
help=(
"Whether to queue trials when the cluster does not currently have "
"enough resources to launch one. This should be set to True when "
"running on an autoscaling cluster to enable automatic scale-up."))
parser.add_argument(
"-f",
"--config-file",
default=None,
type=str,
help="If specified, use config options from this file. Note that this "
"overrides any trial-specific options set via flags above.")
return parser
|
flexible
|
{
"blob_id": "79a8ff0000f3be79a62d693ed6bae7480673d970",
"index": 6075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(parser_creator=parser_creator, formatter_class=\n argparse.RawDescriptionHelpFormatter, description=\n 'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)\n parser.add_argument('--ray-address', default=None, type=str, help=\n 'Connect to an existing Ray cluster at this address instead of starting a new one.'\n )\n parser.add_argument('--ray-num-cpus', default=None, type=int, help=\n '--num-cpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-gpus', default=None, type=int, help=\n '--num-gpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-nodes', default=None, type=int, help=\n 'Emulate multiple cluster nodes for debugging.')\n parser.add_argument('--ray-redis-max-memory', default=None, type=int,\n help='--redis-max-memory to use if starting a new cluster.')\n parser.add_argument('--ray-memory', default=None, type=int, help=\n '--memory to use if starting a new cluster.')\n parser.add_argument('--ray-object-store-memory', default=None, type=int,\n help='--object-store-memory to use if starting a new cluster.')\n parser.add_argument('--experiment-name', default='default', type=str,\n help='Name of the subdirectory under `local_dir` to put results in.')\n parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=\n str, help=\n \"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument('--upload-dir', default='', type=str, help=\n 'Optional URI to sync training results to (e.g. s3://bucket).')\n parser.add_argument('-v', action='store_true', help=\n 'Whether to use INFO level logging.')\n parser.add_argument('-vv', action='store_true', help=\n 'Whether to use DEBUG level logging.')\n parser.add_argument('--resume', action='store_true', help=\n 'Whether to attempt to resume previous Tune experiments.')\n parser.add_argument('--torch', action='store_true', help=\n 'Whether to use PyTorch (instead of tf) as the DL framework.')\n parser.add_argument('--eager', action='store_true', help=\n 'Whether to attempt to enable TF eager execution.')\n parser.add_argument('--trace', action='store_true', help=\n 'Whether to attempt to enable tracing for eager mode.')\n parser.add_argument('--log-flatland-stats', action='store_true',\n default=True, help=\n 'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'\n )\n parser.add_argument('-e', '--eval', action='store_true', help=\n 'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'\n )\n parser.add_argument('--bind-all', action='store_true', default=False,\n help=\n 'Whether to expose on network (binding on all network interfaces).')\n parser.add_argument('--env', default=None, type=str, help=\n 'The gym environment to use.')\n parser.add_argument('--queue-trials', action='store_true', help=\n 'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'\n )\n parser.add_argument('-f', '--config-file', default=None, type=str, help\n =\n 'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'\n )\n return parser\n",
"step-3": "<mask token>\nEXAMPLE_USAGE = \"\"\"\nTraining example:\n python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats\n\nTraining with Config:\n python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml\n\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(parser_creator=parser_creator, formatter_class=\n argparse.RawDescriptionHelpFormatter, description=\n 'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)\n parser.add_argument('--ray-address', default=None, type=str, help=\n 'Connect to an existing Ray cluster at this address instead of starting a new one.'\n )\n parser.add_argument('--ray-num-cpus', default=None, type=int, help=\n '--num-cpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-gpus', default=None, type=int, help=\n '--num-gpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-nodes', default=None, type=int, help=\n 'Emulate multiple cluster nodes for debugging.')\n parser.add_argument('--ray-redis-max-memory', default=None, type=int,\n help='--redis-max-memory to use if starting a new cluster.')\n parser.add_argument('--ray-memory', default=None, type=int, help=\n '--memory to use if starting a new cluster.')\n parser.add_argument('--ray-object-store-memory', default=None, type=int,\n help='--object-store-memory to use if starting a new cluster.')\n parser.add_argument('--experiment-name', default='default', type=str,\n help='Name of the subdirectory under `local_dir` to put results in.')\n parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=\n str, help=\n \"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument('--upload-dir', default='', type=str, help=\n 'Optional URI to sync training results to (e.g. s3://bucket).')\n parser.add_argument('-v', action='store_true', help=\n 'Whether to use INFO level logging.')\n parser.add_argument('-vv', action='store_true', help=\n 'Whether to use DEBUG level logging.')\n parser.add_argument('--resume', action='store_true', help=\n 'Whether to attempt to resume previous Tune experiments.')\n parser.add_argument('--torch', action='store_true', help=\n 'Whether to use PyTorch (instead of tf) as the DL framework.')\n parser.add_argument('--eager', action='store_true', help=\n 'Whether to attempt to enable TF eager execution.')\n parser.add_argument('--trace', action='store_true', help=\n 'Whether to attempt to enable tracing for eager mode.')\n parser.add_argument('--log-flatland-stats', action='store_true',\n default=True, help=\n 'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'\n )\n parser.add_argument('-e', '--eval', action='store_true', help=\n 'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'\n )\n parser.add_argument('--bind-all', action='store_true', default=False,\n help=\n 'Whether to expose on network (binding on all network interfaces).')\n parser.add_argument('--env', default=None, type=str, help=\n 'The gym environment to use.')\n parser.add_argument('--queue-trials', action='store_true', help=\n 'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'\n )\n parser.add_argument('-f', '--config-file', default=None, type=str, help\n =\n 'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'\n )\n return parser\n",
"step-4": "import argparse\nfrom ray.tune.config_parser import make_parser\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\nEXAMPLE_USAGE = \"\"\"\nTraining example:\n python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats\n\nTraining with Config:\n python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml\n\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(parser_creator=parser_creator, formatter_class=\n argparse.RawDescriptionHelpFormatter, description=\n 'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)\n parser.add_argument('--ray-address', default=None, type=str, help=\n 'Connect to an existing Ray cluster at this address instead of starting a new one.'\n )\n parser.add_argument('--ray-num-cpus', default=None, type=int, help=\n '--num-cpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-gpus', default=None, type=int, help=\n '--num-gpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-nodes', default=None, type=int, help=\n 'Emulate multiple cluster nodes for debugging.')\n parser.add_argument('--ray-redis-max-memory', default=None, type=int,\n help='--redis-max-memory to use if starting a new cluster.')\n parser.add_argument('--ray-memory', default=None, type=int, help=\n '--memory to use if starting a new cluster.')\n parser.add_argument('--ray-object-store-memory', default=None, type=int,\n help='--object-store-memory to use if starting a new cluster.')\n parser.add_argument('--experiment-name', default='default', type=str,\n help='Name of the subdirectory under `local_dir` to put results in.')\n parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=\n str, help=\n \"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument('--upload-dir', default='', type=str, help=\n 'Optional URI to sync training results to (e.g. s3://bucket).')\n parser.add_argument('-v', action='store_true', help=\n 'Whether to use INFO level logging.')\n parser.add_argument('-vv', action='store_true', help=\n 'Whether to use DEBUG level logging.')\n parser.add_argument('--resume', action='store_true', help=\n 'Whether to attempt to resume previous Tune experiments.')\n parser.add_argument('--torch', action='store_true', help=\n 'Whether to use PyTorch (instead of tf) as the DL framework.')\n parser.add_argument('--eager', action='store_true', help=\n 'Whether to attempt to enable TF eager execution.')\n parser.add_argument('--trace', action='store_true', help=\n 'Whether to attempt to enable tracing for eager mode.')\n parser.add_argument('--log-flatland-stats', action='store_true',\n default=True, help=\n 'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'\n )\n parser.add_argument('-e', '--eval', action='store_true', help=\n 'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'\n )\n parser.add_argument('--bind-all', action='store_true', default=False,\n help=\n 'Whether to expose on network (binding on all network interfaces).')\n parser.add_argument('--env', default=None, type=str, help=\n 'The gym environment to use.')\n parser.add_argument('--queue-trials', action='store_true', help=\n 'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'\n )\n parser.add_argument('-f', '--config-file', default=None, type=str, help\n =\n 'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'\n )\n return parser\n",
"step-5": "import argparse\n\nfrom ray.tune.config_parser import make_parser\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\n\nEXAMPLE_USAGE = \"\"\"\nTraining example:\n python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats\n\nTraining with Config:\n python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml\n\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(\n parser_creator=parser_creator,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Train a reinforcement learning agent.\",\n epilog=EXAMPLE_USAGE)\n\n # See also the base parser definition in ray/tune/config_parser.py\n parser.add_argument(\n \"--ray-address\",\n default=None,\n type=str,\n help=\"Connect to an existing Ray cluster at this address instead \"\n \"of starting a new one.\")\n parser.add_argument(\n \"--ray-num-cpus\",\n default=None,\n type=int,\n help=\"--num-cpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-gpus\",\n default=None,\n type=int,\n help=\"--num-gpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-nodes\",\n default=None,\n type=int,\n help=\"Emulate multiple cluster nodes for debugging.\")\n parser.add_argument(\n \"--ray-redis-max-memory\",\n default=None,\n type=int,\n help=\"--redis-max-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-memory\",\n default=None,\n type=int,\n help=\"--memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-object-store-memory\",\n default=None,\n type=int,\n help=\"--object-store-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--experiment-name\",\n default=\"default\",\n type=str,\n help=\"Name of the subdirectory under `local_dir` to put results in.\")\n parser.add_argument(\n \"--local-dir\",\n default=DEFAULT_RESULTS_DIR,\n type=str,\n help=\"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument(\n \"--upload-dir\",\n default=\"\",\n type=str,\n help=\"Optional URI to sync training results to (e.g. s3://bucket).\")\n parser.add_argument(\n \"-v\", action=\"store_true\", help=\"Whether to use INFO level logging.\")\n parser.add_argument(\n \"-vv\", action=\"store_true\", help=\"Whether to use DEBUG level logging.\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Whether to attempt to resume previous Tune experiments.\")\n parser.add_argument(\n \"--torch\",\n action=\"store_true\",\n help=\"Whether to use PyTorch (instead of tf) as the DL framework.\")\n parser.add_argument(\n \"--eager\",\n action=\"store_true\",\n help=\"Whether to attempt to enable TF eager execution.\")\n parser.add_argument(\n \"--trace\",\n action=\"store_true\",\n help=\"Whether to attempt to enable tracing for eager mode.\")\n parser.add_argument(\n \"--log-flatland-stats\",\n action=\"store_true\",\n default=True,\n help=\"Whether to log additional flatland specfic metrics such as percentage complete or normalized score.\")\n parser.add_argument(\n \"-e\",\n \"--eval\",\n action=\"store_true\",\n help=\"Whether to run evaluation. Default evaluation config is default.yaml \"\n \"to use custom evaluation config set (eval_generator:high_eval) under configs\")\n parser.add_argument(\n \"--bind-all\",\n action=\"store_true\",\n default=False,\n help=\"Whether to expose on network (binding on all network interfaces).\")\n parser.add_argument(\n \"--env\", default=None, type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--queue-trials\",\n action=\"store_true\",\n help=(\n \"Whether to queue trials when the cluster does not currently have \"\n \"enough resources to launch one. This should be set to True when \"\n \"running on an autoscaling cluster to enable automatic scale-up.\"))\n parser.add_argument(\n \"-f\",\n \"--config-file\",\n default=None,\n type=str,\n help=\"If specified, use config options from this file. Note that this \"\n \"overrides any trial-specific options set via flags above.\")\n return parser\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class article(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(article, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey(article, on_delete=models.CASCADE,
related_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class topic(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class article(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True, blank=True,
editable=True, null=True)
topic = models.ForeignKey(topic, on_delete=models.CASCADE)
author = models.CharField(max_length=255)
opening = models.TextField()
body = RichTextUploadingField()
date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(null=True)
view = models.IntegerField(default=0, null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(article, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey(article, on_delete=models.CASCADE,
related_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class topic(models.Model):
name = models.CharField(max_length=255, primary_key=True)
showname = models.CharField(max_length=255, null=True)
def __str__(self):
return self.name
class article(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True, blank=True,
editable=True, null=True)
topic = models.ForeignKey(topic, on_delete=models.CASCADE)
author = models.CharField(max_length=255)
opening = models.TextField()
body = RichTextUploadingField()
date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(null=True)
view = models.IntegerField(default=0, null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(article, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey(article, on_delete=models.CASCADE,
related_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
<|reserved_special_token_1|>
from django.db import models
from django.conf import settings
from django.utils.text import slugify
from six import python_2_unicode_compatible
from ckeditor_uploader.fields import RichTextUploadingField
from ckeditor.fields import RichTextField
class topic(models.Model):
name = models.CharField(max_length=255, primary_key=True)
showname = models.CharField(max_length=255, null=True)
def __str__(self):
return self.name
class article(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True, blank=True,
editable=True, null=True)
topic = models.ForeignKey(topic, on_delete=models.CASCADE)
author = models.CharField(max_length=255)
opening = models.TextField()
body = RichTextUploadingField()
date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(null=True)
view = models.IntegerField(default=0, null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(article, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey(article, on_delete=models.CASCADE,
related_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
<|reserved_special_token_1|>
from django.db import models
from django.conf import settings
from django.utils.text import slugify
from six import python_2_unicode_compatible
from ckeditor_uploader.fields import RichTextUploadingField
from ckeditor.fields import RichTextField
# Create your models here.
class topic(models.Model):
name = models.CharField(max_length=255, primary_key=True)
showname = models.CharField(max_length=255, null= True)
def __str__(self):
return self.name
class article(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique= True, blank=True, editable=True, null = True)
topic = models.ForeignKey(topic, on_delete=models.CASCADE)
author = models.CharField(max_length=255)
opening = models.TextField()
body = RichTextUploadingField()
date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(null = True)
view = models.IntegerField(default=0, null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(article, self).save(*args, **kwargs)
class Comment(models.Model):
post = models.ForeignKey(article, on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
|
flexible
|
{
"blob_id": "31801f62942337b0cdf0e022dc75a9e125be54e3",
"index": 4191,
"step-1": "<mask token>\n\n\nclass article(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(article, self).save(*args, **kwargs)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(article, on_delete=models.CASCADE,\n related_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n body = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n",
"step-2": "<mask token>\n\n\nclass topic(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass article(models.Model):\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True, blank=True,\n editable=True, null=True)\n topic = models.ForeignKey(topic, on_delete=models.CASCADE)\n author = models.CharField(max_length=255)\n opening = models.TextField()\n body = RichTextUploadingField()\n date = models.DateTimeField(auto_now_add=True)\n image = models.ImageField(null=True)\n view = models.IntegerField(default=0, null=True)\n\n def __str__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(article, self).save(*args, **kwargs)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(article, on_delete=models.CASCADE,\n related_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n body = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n",
"step-3": "<mask token>\n\n\nclass topic(models.Model):\n name = models.CharField(max_length=255, primary_key=True)\n showname = models.CharField(max_length=255, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass article(models.Model):\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True, blank=True,\n editable=True, null=True)\n topic = models.ForeignKey(topic, on_delete=models.CASCADE)\n author = models.CharField(max_length=255)\n opening = models.TextField()\n body = RichTextUploadingField()\n date = models.DateTimeField(auto_now_add=True)\n image = models.ImageField(null=True)\n view = models.IntegerField(default=0, null=True)\n\n def __str__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(article, self).save(*args, **kwargs)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(article, on_delete=models.CASCADE,\n related_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n body = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n",
"step-4": "from django.db import models\nfrom django.conf import settings\nfrom django.utils.text import slugify\nfrom six import python_2_unicode_compatible\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom ckeditor.fields import RichTextField\n\n\nclass topic(models.Model):\n name = models.CharField(max_length=255, primary_key=True)\n showname = models.CharField(max_length=255, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass article(models.Model):\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True, blank=True,\n editable=True, null=True)\n topic = models.ForeignKey(topic, on_delete=models.CASCADE)\n author = models.CharField(max_length=255)\n opening = models.TextField()\n body = RichTextUploadingField()\n date = models.DateTimeField(auto_now_add=True)\n image = models.ImageField(null=True)\n view = models.IntegerField(default=0, null=True)\n\n def __str__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(article, self).save(*args, **kwargs)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(article, on_delete=models.CASCADE,\n related_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n body = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n",
"step-5": "from django.db import models\nfrom django.conf import settings\nfrom django.utils.text import slugify\nfrom six import python_2_unicode_compatible\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom ckeditor.fields import RichTextField\n# Create your models here.\nclass topic(models.Model):\n name = models.CharField(max_length=255, primary_key=True)\n showname = models.CharField(max_length=255, null= True)\n\n def __str__(self):\n return self.name\n\nclass article(models.Model):\n title = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique= True, blank=True, editable=True, null = True)\n topic = models.ForeignKey(topic, on_delete=models.CASCADE)\n author = models.CharField(max_length=255)\n opening = models.TextField()\n body = RichTextUploadingField()\n date = models.DateTimeField(auto_now_add=True)\n image = models.ImageField(null = True)\n view = models.IntegerField(default=0, null=True)\n \n\n def __str__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(article, self).save(*args, **kwargs)\n \n\nclass Comment(models.Model):\n post = models.ForeignKey(article, on_delete=models.CASCADE, related_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n body = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n\n\n\n ",
"step-ids": [
5,
7,
9,
10,
11
]
}
|
[
5,
7,
9,
10,
11
] |
from django.db import models
from django.urls import reverse
from django.conf import settings
from embed_video.fields import EmbedVideoField
from django.contrib.auth.models import AbstractBaseUser
User = settings.AUTH_USER_MODEL
# Create your models here.
"""class User(models.Model):
username = models.CharField(max_length=20)
created_at = models.DateTimeField()
is_enabled = models.BooleanField(default=True)
email = models.EmailField()
password = models.CharField(max_length=20)
def __str__(self):
return self.username"""
class Post(models.Model):
is_enabled = models.BooleanField(default=True)
parent = models.ForeignKey(
'self', on_delete=models.PROTECT, blank=True, null=True, default=''
)
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True, blank=True)
author = models.ForeignKey(
User,
on_delete=models.PROTECT
)
class Meta:
ordering = ['parent_id', 'created_at']
def display_text(self):
short = " ".join(self.text.split()[0:5])
if len(short) > 20:
short = self.text[:20] + "..."
return short
display_text.short_description = 'Text'
def __str__(self):
space = " "
return f'{space.join(self.text.split()[0:5])} ({str(self.created_at)})'
def get_absolute_url(self):
return reverse('post-detail', args=[str(self.id)])
class Item(models.Model):
video = EmbedVideoField()
|
normal
|
{
"blob_id": "5c4a48de94cf5bfe67e6a74c33a317fa1da8d2fa",
"index": 7330,
"step-1": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['parent_id', 'created_at']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Item(models.Model):\n video = EmbedVideoField()\n",
"step-2": "<mask token>\n\n\nclass Post(models.Model):\n is_enabled = models.BooleanField(default=True)\n parent = models.ForeignKey('self', on_delete=models.PROTECT, blank=True,\n null=True, default='')\n text = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True, blank=True)\n author = models.ForeignKey(User, on_delete=models.PROTECT)\n\n\n class Meta:\n ordering = ['parent_id', 'created_at']\n\n def display_text(self):\n short = ' '.join(self.text.split()[0:5])\n if len(short) > 20:\n short = self.text[:20] + '...'\n return short\n display_text.short_description = 'Text'\n\n def __str__(self):\n space = ' '\n return f'{space.join(self.text.split()[0:5])} ({str(self.created_at)})'\n\n def get_absolute_url(self):\n return reverse('post-detail', args=[str(self.id)])\n\n\nclass Item(models.Model):\n video = EmbedVideoField()\n",
"step-3": "<mask token>\nUser = settings.AUTH_USER_MODEL\n<mask token>\n\n\nclass Post(models.Model):\n is_enabled = models.BooleanField(default=True)\n parent = models.ForeignKey('self', on_delete=models.PROTECT, blank=True,\n null=True, default='')\n text = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True, blank=True)\n author = models.ForeignKey(User, on_delete=models.PROTECT)\n\n\n class Meta:\n ordering = ['parent_id', 'created_at']\n\n def display_text(self):\n short = ' '.join(self.text.split()[0:5])\n if len(short) > 20:\n short = self.text[:20] + '...'\n return short\n display_text.short_description = 'Text'\n\n def __str__(self):\n space = ' '\n return f'{space.join(self.text.split()[0:5])} ({str(self.created_at)})'\n\n def get_absolute_url(self):\n return reverse('post-detail', args=[str(self.id)])\n\n\nclass Item(models.Model):\n video = EmbedVideoField()\n",
"step-4": "from django.db import models\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom embed_video.fields import EmbedVideoField\nfrom django.contrib.auth.models import AbstractBaseUser\nUser = settings.AUTH_USER_MODEL\n<mask token>\n\n\nclass Post(models.Model):\n is_enabled = models.BooleanField(default=True)\n parent = models.ForeignKey('self', on_delete=models.PROTECT, blank=True,\n null=True, default='')\n text = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True, blank=True)\n author = models.ForeignKey(User, on_delete=models.PROTECT)\n\n\n class Meta:\n ordering = ['parent_id', 'created_at']\n\n def display_text(self):\n short = ' '.join(self.text.split()[0:5])\n if len(short) > 20:\n short = self.text[:20] + '...'\n return short\n display_text.short_description = 'Text'\n\n def __str__(self):\n space = ' '\n return f'{space.join(self.text.split()[0:5])} ({str(self.created_at)})'\n\n def get_absolute_url(self):\n return reverse('post-detail', args=[str(self.id)])\n\n\nclass Item(models.Model):\n video = EmbedVideoField()\n",
"step-5": "from django.db import models\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom embed_video.fields import EmbedVideoField\nfrom django.contrib.auth.models import AbstractBaseUser\n\nUser = settings.AUTH_USER_MODEL\n\n# Create your models here.\n\n\"\"\"class User(models.Model):\n username = models.CharField(max_length=20)\n created_at = models.DateTimeField()\n is_enabled = models.BooleanField(default=True)\n email = models.EmailField()\n password = models.CharField(max_length=20)\n\n def __str__(self):\n return self.username\"\"\"\n\n\nclass Post(models.Model):\n is_enabled = models.BooleanField(default=True)\n parent = models.ForeignKey(\n 'self', on_delete=models.PROTECT, blank=True, null=True, default=''\n )\n text = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True, blank=True)\n author = models.ForeignKey(\n User,\n on_delete=models.PROTECT\n )\n\n class Meta:\n ordering = ['parent_id', 'created_at']\n\n def display_text(self):\n short = \" \".join(self.text.split()[0:5])\n if len(short) > 20:\n short = self.text[:20] + \"...\"\n return short\n\n display_text.short_description = 'Text'\n\n def __str__(self):\n space = \" \"\n return f'{space.join(self.text.split()[0:5])} ({str(self.created_at)})'\n\n def get_absolute_url(self):\n return reverse('post-detail', args=[str(self.id)])\n\n\nclass Item(models.Model):\n video = EmbedVideoField()\n",
"step-ids": [
3,
7,
8,
9,
10
]
}
|
[
3,
7,
8,
9,
10
] |
import pandas as pd
import tensorflow as tf
import autokeras as ak
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from numpy import concatenate
from pandas import read_csv, DataFrame, concat
from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24*6 #48hr * 10분단위 예측
TIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
# power
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int})
print(power_df.shape)
# sensor
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
# scale
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range = (0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values))
# JOIN
df = weather_scaleddf.copy()
# pow + weather + powY
df.insert(0, 'pow', power_scaleddf.values, True)
#df = df.iloc[0:-TIME_STEPS, :]
#df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)
#df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)
#df.to_csv(SAVE_PATH+"total_scaled"+SAVE_NAME+".csv",mode='w',index=False, encoding='CP949')
#display(df)
return pow_scaler, df
pow_scaler, df = getData()
#display(df)
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
data_x_val = validation_data[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
# Data with train data and the unseen data from subsequent time steps.
data_x_test = dataset[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
data_y = data_train["pow"].astype("float64")
data_y_val = validation_data["pow"].astype("float64")
print(data_x.shape) # (6549, 12)
print(data_y.shape) # (6549,)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(
lookback=lookback,
predict_from=predict_from,
#predict_until=predict_until,
#max_trials=1,
objective="val_loss",
)
# Train the TimeSeriesForecaster with train data
clf.fit(
x=data_x,
y=data_y,
validation_data=(data_x_val, data_y_val),
batch_size=128,
epochs=10,
)
# Predict with the best model(includes original training data).
predictions = clf.predict(data_x_test)
print(predictions.shape)
# Evaluate the best model with testing data.
print(clf.evaluate(data_x_val, data_y_val))
|
normal
|
{
"blob_id": "013189cd67cc44efd539c75ed235a0753d95f54e",
"index": 2165,
"step-1": "<mask token>\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(suppress=True)\n<mask token>\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\n<mask token>\nprint(data_x.shape)\nprint(data_y.shape)\n<mask token>\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\n<mask token>\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n",
"step-3": "<mask token>\nnp.set_printoptions(suppress=True)\nEPOCHS = 10\nBATCH_SIZE = 128\nSHIFT_DAYS = 3\nPRED_STEPS = 24 * 6\nTIME_STEPS = SHIFT_DAYS * PRED_STEPS\nDIMENSION = 15\nMODEL_NUM = 10\nCAPACITY = 89.7\nTRAIN_RATIO = 0.6\nVAL_RATIO = 0.2\nSTART_DATE = '2021012899'\nEND_DATE = '2021042924'\nSAVE_PATH = './data/'\nSAVE_NAME = 'autoML_Test'\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\npow_scaler, df = getData()\ndataset = df\nval_split = int(len(dataset) * 0.7)\ndata_train = dataset[:val_split]\nvalidation_data = dataset[val_split:]\ndata_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',\n 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_y = data_train['pow'].astype('float64')\ndata_y_val = validation_data['pow'].astype('float64')\nprint(data_x.shape)\nprint(data_y.shape)\npredict_from = 1\npredict_until = 10\nlookback = 3\nclf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,\n objective='val_loss')\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\npredictions = clf.predict(data_x_test)\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n",
"step-4": "import pandas as pd\nimport tensorflow as tf\nimport autokeras as ak\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\nfrom numpy import concatenate\nfrom pandas import read_csv, DataFrame, concat\nfrom sklearn.preprocessing import MinMaxScaler\nnp.set_printoptions(suppress=True)\nEPOCHS = 10\nBATCH_SIZE = 128\nSHIFT_DAYS = 3\nPRED_STEPS = 24 * 6\nTIME_STEPS = SHIFT_DAYS * PRED_STEPS\nDIMENSION = 15\nMODEL_NUM = 10\nCAPACITY = 89.7\nTRAIN_RATIO = 0.6\nVAL_RATIO = 0.2\nSTART_DATE = '2021012899'\nEND_DATE = '2021042924'\nSAVE_PATH = './data/'\nSAVE_NAME = 'autoML_Test'\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\npow_scaler, df = getData()\ndataset = df\nval_split = int(len(dataset) * 0.7)\ndata_train = dataset[:val_split]\nvalidation_data = dataset[val_split:]\ndata_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',\n 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_y = data_train['pow'].astype('float64')\ndata_y_val = validation_data['pow'].astype('float64')\nprint(data_x.shape)\nprint(data_y.shape)\npredict_from = 1\npredict_until = 10\nlookback = 3\nclf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,\n objective='val_loss')\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\npredictions = clf.predict(data_x_test)\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n",
"step-5": "import pandas as pd\r\nimport tensorflow as tf\r\nimport autokeras as ak\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom numpy import concatenate\r\nfrom pandas import read_csv, DataFrame, concat\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nnp.set_printoptions(suppress=True)\r\n\r\nEPOCHS = 10\r\nBATCH_SIZE = 128\r\n\r\nSHIFT_DAYS = 3\r\nPRED_STEPS = 24*6 #48hr * 10분단위 예측\r\nTIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step\r\nDIMENSION = 15\r\nMODEL_NUM = 10\r\nCAPACITY = 89.7\r\n\r\nTRAIN_RATIO = 0.6\r\nVAL_RATIO = 0.2\r\n\r\nSTART_DATE = '2021012899'\r\nEND_DATE = '2021042924'\r\n\r\nSAVE_PATH = './data/'\r\nSAVE_NAME = 'autoML_Test'\r\n\r\n\r\ndef getData():\r\n # power\r\n power_file = './data/power_20210129_20210429_preprocess_1hour'\r\n power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int})\r\n print(power_df.shape)\r\n \r\n # sensor \r\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\r\n sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int})\r\n sensor_df = sensor_df.sort_values('date')\r\n print(sensor_df.shape)\r\n\r\n # scale\r\n power_df.drop(['date'], axis=1, inplace=True)\r\n pow_scaler = MinMaxScaler(feature_range = (0, 1))\r\n scaled_pow = pow_scaler.fit_transform(power_df.values)\r\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values))\r\n\r\n weather_df = sensor_df.copy()\r\n weather_df.drop(['date'], axis=1, inplace=True)\r\n weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale\r\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\r\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values))\r\n\r\n # JOIN \r\n df = weather_scaleddf.copy()\r\n\r\n # pow + weather + powY\r\n df.insert(0, 'pow', power_scaleddf.values, True)\r\n #df = df.iloc[0:-TIME_STEPS, :]\r\n #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)\r\n #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)\r\n\r\n #df.to_csv(SAVE_PATH+\"total_scaled\"+SAVE_NAME+\".csv\",mode='w',index=False, encoding='CP949')\r\n #display(df) \r\n\r\n return pow_scaler, df\r\n\r\npow_scaler, df = getData()\r\n#display(df)\r\n\r\ndataset = df\r\nval_split = int(len(dataset) * 0.7)\r\ndata_train = dataset[:val_split]\r\nvalidation_data = dataset[val_split:]\r\n\r\ndata_x = data_train[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\ndata_x_val = validation_data[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\n# Data with train data and the unseen data from subsequent time steps.\r\ndata_x_test = dataset[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\ndata_y = data_train[\"pow\"].astype(\"float64\")\r\n\r\ndata_y_val = validation_data[\"pow\"].astype(\"float64\")\r\n\r\nprint(data_x.shape) # (6549, 12)\r\nprint(data_y.shape) # (6549,)\r\n\r\npredict_from = 1\r\npredict_until = 10\r\nlookback = 3\r\nclf = ak.TimeseriesForecaster(\r\n lookback=lookback,\r\n predict_from=predict_from,\r\n #predict_until=predict_until,\r\n #max_trials=1,\r\n objective=\"val_loss\",\r\n)\r\n# Train the TimeSeriesForecaster with train data\r\nclf.fit(\r\n x=data_x,\r\n y=data_y,\r\n validation_data=(data_x_val, data_y_val),\r\n batch_size=128,\r\n epochs=10,\r\n)\r\n# Predict with the best model(includes original training data).\r\npredictions = clf.predict(data_x_test)\r\nprint(predictions.shape)\r\n# Evaluate the best model with testing data.\r\nprint(clf.evaluate(data_x_val, data_y_val))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from setuptools import setup, find_packages
setup(
name='testspace-python',
version='',
packages=find_packages(include=['testspace', 'testspace.*']),
url='',
license="MIT license",
author="Jeffrey Schultz",
author_email='[email protected]',
description="Module for interacting with Testspace Server",
install_requires=[
'requests',
]
)
|
normal
|
{
"blob_id": "7bc2a02d85c3b1a2b7ed61dc7567d1097b63d658",
"index": 3559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='testspace-python', version='', packages=find_packages(include=[\n 'testspace', 'testspace.*']), url='', license='MIT license', author=\n 'Jeffrey Schultz', author_email='[email protected]', description\n ='Module for interacting with Testspace Server', install_requires=[\n 'requests'])\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='testspace-python', version='', packages=find_packages(include=[\n 'testspace', 'testspace.*']), url='', license='MIT license', author=\n 'Jeffrey Schultz', author_email='[email protected]', description\n ='Module for interacting with Testspace Server', install_requires=[\n 'requests'])\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(\n name='testspace-python',\n version='',\n packages=find_packages(include=['testspace', 'testspace.*']),\n url='',\n license=\"MIT license\",\n author=\"Jeffrey Schultz\",\n author_email='[email protected]',\n description=\"Module for interacting with Testspace Server\",\n install_requires=[\n 'requests',\n ]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class ProjectrolesConfig(AppConfig):
name = 'projectroles'
|
normal
|
{
"blob_id": "6a4585e0e2f5ebbd0f9a7fa203f76bb88ff9c2a0",
"index": 2920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProjectrolesConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ProjectrolesConfig(AppConfig):\n name = 'projectroles'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ProjectrolesConfig(AppConfig):\n name = 'projectroles'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def str_or_none(v):
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
<|reserved_special_token_0|>
def name2dic(s):
return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}
<|reserved_special_token_0|>
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, '{}.pkl'.format(name))
if os.path.exists(pkl_file):
print('{} pickle file found, loading...'.format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
print('{} pickle file not found, creating...'.format(pkl_file))
df = pd.read_csv(join(load_path, '{}.csv'.format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print('{} Load complete. Time {}'.format(name, time.time() - start))
return df
def logSumExpTensor(vec):
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec -
max_score_broadcast), 1))
<|reserved_special_token_0|>
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose() - denom).transpose()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def str_or_none(v):
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
def dic2name(dic):
return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}
<|reserved_special_token_0|>
def df_index_gen(f, table=False):
f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[
'dataset_id']]), axis=1)
if not table:
f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-
1], axis=1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, '{}.pkl'.format(name))
if os.path.exists(pkl_file):
print('{} pickle file found, loading...'.format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
print('{} pickle file not found, creating...'.format(pkl_file))
df = pd.read_csv(join(load_path, '{}.csv'.format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print('{} Load complete. Time {}'.format(name, time.time() - start))
return df
def logSumExpTensor(vec):
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec -
max_score_broadcast), 1))
<|reserved_special_token_0|>
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose() - denom).transpose()
def logDot(a, b):
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
C += max_a + max_b
return C
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def str_or_none(v):
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
def dic2name(dic):
return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}
<|reserved_special_token_0|>
def df_index_gen(f, table=False):
f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[
'dataset_id']]), axis=1)
if not table:
f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-
1], axis=1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, '{}.pkl'.format(name))
if os.path.exists(pkl_file):
print('{} pickle file found, loading...'.format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
print('{} pickle file not found, creating...'.format(pkl_file))
df = pd.read_csv(join(load_path, '{}.csv'.format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print('{} Load complete. Time {}'.format(name, time.time() - start))
return df
def logSumExpTensor(vec):
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec -
max_score_broadcast), 1))
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size()) == 2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size()) == 3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.
size()[2])
return a - denom
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose() - denom).transpose()
def logDot(a, b):
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
C += max_a + max_b
return C
<|reserved_special_token_1|>
import os
from os.path import join
import json
import pandas as pd
import time
import numpy as np
import torch
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str_or_none(v):
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
def dic2name(dic):
return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}
def get_valid_types(TYPENAME):
with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r'
) as typefile:
valid_types = json.load(typefile)[TYPENAME]
return valid_types
def df_index_gen(f, table=False):
f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[
'dataset_id']]), axis=1)
if not table:
f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-
1], axis=1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, '{}.pkl'.format(name))
if os.path.exists(pkl_file):
print('{} pickle file found, loading...'.format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
print('{} pickle file not found, creating...'.format(pkl_file))
df = pd.read_csv(join(load_path, '{}.csv'.format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print('{} Load complete. Time {}'.format(name, time.time() - start))
return df
def logSumExpTensor(vec):
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec -
max_score_broadcast), 1))
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size()) == 2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size()) == 3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.
size()[2])
return a - denom
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose() - denom).transpose()
def logDot(a, b):
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
C += max_a + max_b
return C
<|reserved_special_token_1|>
import os
from os.path import join
import json
import pandas as pd
import time
import numpy as np
import torch
def str2bool(v):
# convert string to boolean type for argparser input
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str_or_none(v):
# convert string to boolean type for argparser input
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
# helper functions for LDA arguments
def dic2name(dic):
return '_'.join(["{}-{}".format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}
def get_valid_types(TYPENAME):
with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile:
valid_types = json.load(typefile)[TYPENAME]
return valid_types
def df_index_gen(f, table=False):
# merge locator and dataset_id to genearte index table_id
f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)
if not table:
f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(":")[-1], axis = 1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
# load dataframe from pickle or create pickle file
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, "{}.pkl".format(name))
if os.path.exists(pkl_file):
print("{} pickle file found, loading...".format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
#process and save pkl
print("{} pickle file not found, creating...".format(pkl_file))
df = pd.read_csv(join(load_path, "{}.csv".format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print("{} Load complete. Time {}".format(name, time.time()-start))
return df
def logSumExpTensor(vec):
# vec -> 16, tag_size
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size())==2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size())==3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])
return (a-denom)
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose()- denom).transpose()
def logDot(a, b):
# numeric stable way of calculating log (e^a, e^b)
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
# else:
# np.log(C + 1e-300, out=C)
C += max_a + max_b
return C
|
flexible
|
{
"blob_id": "a9302dbf724f9548411fbf2959f36b4cc5742ff8",
"index": 4999,
"step-1": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\n<mask token>\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\n<mask token>\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\n<mask token>\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-3": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\ndef logNormalizeTensor(a):\n denom = logSumExpTensor(a)\n if len(a.size()) == 2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size()) == 3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.\n size()[2])\n return a - denom\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-4": "import os\nfrom os.path import join\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\ndef get_valid_types(TYPENAME):\n with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r'\n ) as typefile:\n valid_types = json.load(typefile)[TYPENAME]\n return valid_types\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\ndef logNormalizeTensor(a):\n denom = logSumExpTensor(a)\n if len(a.size()) == 2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size()) == 3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.\n size()[2])\n return a - denom\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-5": "import os\nfrom os.path import join\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch \n\ndef str2bool(v):\n # convert string to boolean type for argparser input\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef str_or_none(v):\n # convert string to boolean type for argparser input\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n# helper functions for LDA arguments\ndef dic2name(dic):\n return '_'.join([\"{}-{}\".format(k, dic[k]) for k in sorted(dic)])\n\ndef name2dic(s):\n return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}\n\n\ndef get_valid_types(TYPENAME):\n\n with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile: \n valid_types = json.load(typefile)[TYPENAME]\n return valid_types\n\n\ndef df_index_gen(f, table=False):\n # merge locator and dataset_id to genearte index table_id\n f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)\n if not table:\n f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(\":\")[-1], axis = 1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\n# load dataframe from pickle or create pickle file\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, \"{}.pkl\".format(name))\n if os.path.exists(pkl_file):\n print(\"{} pickle file found, loading...\".format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n #process and save pkl\n print(\"{} pickle file not found, creating...\".format(pkl_file))\n df = pd.read_csv(join(load_path, \"{}.csv\".format(name)))\n\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print(\"{} Load complete. Time {}\".format(name, time.time()-start))\n return df\n\ndef logSumExpTensor(vec):\n # vec -> 16, tag_size\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))\n\ndef logNormalizeTensor(a):\n\n denom = logSumExpTensor(a)\n if len(a.size())==2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size())==3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])\n return (a-denom)\n\ndef logNormalize(a):\n\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose()- denom).transpose()\n\ndef logDot(a, b):\n\n # numeric stable way of calculating log (e^a, e^b)\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C\n",
"step-ids": [
5,
8,
9,
12,
13
]
}
|
[
5,
8,
9,
12,
13
] |
import matplotlib.pyplot as plt
x_int = list(range(1, 5001))
y_int = [i**3 for i in x_int]
plt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)
plt.show()
|
normal
|
{
"blob_id": "40e2b695d8aaaa82cb90694b85d12061b4e6eca8",
"index": 8034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n",
"step-3": "<mask token>\nx_int = list(range(1, 5001))\ny_int = [(i ** 3) for i in x_int]\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nx_int = list(range(1, 5001))\ny_int = [(i ** 3) for i in x_int]\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\n\nx_int = list(range(1, 5001))\ny_int = [i**3 for i in x_int]\n\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask
from raven.contrib.flask import Sentry
from flask.signals import got_request_exception
app = Flask(__name__)
sentry = Sentry(dsn=app.config['SENTRY_DSN'])
@got_request_exception.connect
def log_exception_to_sentry(app, exception=None, **kwargs):
"""
Logs an exception to sentry.
:param app: The current application
:param exception: The exception that occurred
"""
sentry.captureException(exception)
|
normal
|
{
"blob_id": "f739fb56eae1ada2409ef7d75958bad2018f5134",
"index": 2743,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nsentry = Sentry(dsn=app.config['SENTRY_DSN'])\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n",
"step-4": "from flask import Flask\nfrom raven.contrib.flask import Sentry\nfrom flask.signals import got_request_exception\napp = Flask(__name__)\nsentry = Sentry(dsn=app.config['SENTRY_DSN'])\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_new_message_list(channel_id: int):
with Connection() as cn:
token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel
).filter(SlackChannel.id == channel_id).one()
user_dict = {user.user: user.other_name for user in cn.s.query(
SlackUser).all()}
with urllib.request.urlopen(
f'https://slack.com/api/channels.history?token={token}&channel={channel}'
) as res:
json_dict = json.load(res)
print(json_dict)
messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))
client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.
client_msg_id).filter(SlackMessage.client_msg_id.in_([message.
get('client_msg_id') for message in messages])).all()]
message_list = []
insert_msg_id_list = []
for message in messages:
if not (message.get('user') and message.get('text') and message
.get('client_msg_id')):
continue
if message.get('client_msg_id') in client_msg_id_list:
continue
time_stamp = message.get('ts', '')
if time_stamp:
time_stamp = datetime.fromtimestamp(float(time_stamp), jst
).strftime('%m/%d %H:%M:%S')
text = message['text']
for user, name in user_dict.items():
text = text.replace(user, name)
message_list.append(user_dict[message['user']] + ':[' +
time_stamp + '] ' + text)
insert_msg_id_list.append({'client_msg_id': message[
'client_msg_id']})
cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)
cn.s.commit()
return message_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
jst = timezone(timedelta(hours=+9), 'JST')
def get_new_message_list(channel_id: int):
with Connection() as cn:
token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel
).filter(SlackChannel.id == channel_id).one()
user_dict = {user.user: user.other_name for user in cn.s.query(
SlackUser).all()}
with urllib.request.urlopen(
f'https://slack.com/api/channels.history?token={token}&channel={channel}'
) as res:
json_dict = json.load(res)
print(json_dict)
messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))
client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.
client_msg_id).filter(SlackMessage.client_msg_id.in_([message.
get('client_msg_id') for message in messages])).all()]
message_list = []
insert_msg_id_list = []
for message in messages:
if not (message.get('user') and message.get('text') and message
.get('client_msg_id')):
continue
if message.get('client_msg_id') in client_msg_id_list:
continue
time_stamp = message.get('ts', '')
if time_stamp:
time_stamp = datetime.fromtimestamp(float(time_stamp), jst
).strftime('%m/%d %H:%M:%S')
text = message['text']
for user, name in user_dict.items():
text = text.replace(user, name)
message_list.append(user_dict[message['user']] + ':[' +
time_stamp + '] ' + text)
insert_msg_id_list.append({'client_msg_id': message[
'client_msg_id']})
cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)
cn.s.commit()
return message_list
<|reserved_special_token_1|>
from datetime import datetime, timezone, timedelta
import json
import urllib.request
from mysql_dbcon import Connection
from model import SlackChannel, SlackUser, SlackMessage
jst = timezone(timedelta(hours=+9), 'JST')
def get_new_message_list(channel_id: int):
with Connection() as cn:
token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel
).filter(SlackChannel.id == channel_id).one()
user_dict = {user.user: user.other_name for user in cn.s.query(
SlackUser).all()}
with urllib.request.urlopen(
f'https://slack.com/api/channels.history?token={token}&channel={channel}'
) as res:
json_dict = json.load(res)
print(json_dict)
messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))
client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.
client_msg_id).filter(SlackMessage.client_msg_id.in_([message.
get('client_msg_id') for message in messages])).all()]
message_list = []
insert_msg_id_list = []
for message in messages:
if not (message.get('user') and message.get('text') and message
.get('client_msg_id')):
continue
if message.get('client_msg_id') in client_msg_id_list:
continue
time_stamp = message.get('ts', '')
if time_stamp:
time_stamp = datetime.fromtimestamp(float(time_stamp), jst
).strftime('%m/%d %H:%M:%S')
text = message['text']
for user, name in user_dict.items():
text = text.replace(user, name)
message_list.append(user_dict[message['user']] + ':[' +
time_stamp + '] ' + text)
insert_msg_id_list.append({'client_msg_id': message[
'client_msg_id']})
cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)
cn.s.commit()
return message_list
<|reserved_special_token_1|>
from datetime import datetime, timezone, timedelta
import json
import urllib.request
from mysql_dbcon import Connection
from model import SlackChannel, SlackUser, SlackMessage
# TODO set timezone at config
jst = timezone(timedelta(hours=+9), 'JST')
def get_new_message_list(channel_id: int):
with Connection() as cn:
token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel).filter(
SlackChannel.id == channel_id).one()
user_dict = {user.user: user.other_name for user in cn.s.query(SlackUser).all()}
with urllib.request.urlopen(
f'https://slack.com/api/channels.history?token={token}&channel={channel}') as res:
json_dict = json.load(res)
print(json_dict)
messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))
client_msg_id_list = [
id_ for id_, in cn.s.query(SlackMessage.client_msg_id).filter(
SlackMessage.client_msg_id.in_([message.get('client_msg_id') for message in messages])
).all()]
message_list = []
insert_msg_id_list = []
for message in messages:
if not (message.get('user') and message.get('text') and message.get('client_msg_id')):
continue
if message.get('client_msg_id') in client_msg_id_list:
continue
time_stamp = message.get('ts', '')
if time_stamp:
time_stamp = datetime.fromtimestamp(float(time_stamp), jst).strftime('%m/%d %H:%M:%S')
text = message['text']
for user, name in user_dict.items():
text = text.replace(user, name)
message_list.append(user_dict[message['user']] + ':[' + time_stamp + '] ' + text)
insert_msg_id_list.append({'client_msg_id': message['client_msg_id']})
cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)
cn.s.commit()
return message_list
|
flexible
|
{
"blob_id": "2b141f12bec2006e496bf58a3fcb0167c95ab3b6",
"index": 2530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel\n ).filter(SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(\n SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}'\n ) as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.\n client_msg_id).filter(SlackMessage.client_msg_id.in_([message.\n get('client_msg_id') for message in messages])).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message\n .get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst\n ).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' +\n time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message[\n 'client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n return message_list\n",
"step-3": "<mask token>\njst = timezone(timedelta(hours=+9), 'JST')\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel\n ).filter(SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(\n SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}'\n ) as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.\n client_msg_id).filter(SlackMessage.client_msg_id.in_([message.\n get('client_msg_id') for message in messages])).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message\n .get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst\n ).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' +\n time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message[\n 'client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n return message_list\n",
"step-4": "from datetime import datetime, timezone, timedelta\nimport json\nimport urllib.request\nfrom mysql_dbcon import Connection\nfrom model import SlackChannel, SlackUser, SlackMessage\njst = timezone(timedelta(hours=+9), 'JST')\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel\n ).filter(SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(\n SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}'\n ) as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.\n client_msg_id).filter(SlackMessage.client_msg_id.in_([message.\n get('client_msg_id') for message in messages])).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message\n .get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst\n ).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' +\n time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message[\n 'client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n return message_list\n",
"step-5": "from datetime import datetime, timezone, timedelta\nimport json\nimport urllib.request\n\nfrom mysql_dbcon import Connection\nfrom model import SlackChannel, SlackUser, SlackMessage\n\n\n# TODO set timezone at config\njst = timezone(timedelta(hours=+9), 'JST')\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel).filter(\n SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}') as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [\n id_ for id_, in cn.s.query(SlackMessage.client_msg_id).filter(\n SlackMessage.client_msg_id.in_([message.get('client_msg_id') for message in messages])\n ).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message.get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' + time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message['client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n\n return message_list\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class SlackEvent:
@property
def client_msg_id(self):
pass
@property
def type(self):
pass
@property
def subtype(self):
pass
@property
def text(self):
pass
@property
def time_stamp(self):
pass
@property
def channel(self):
pass
@property
def channel_id(self):
pass
@property
def event_time_stamp(self):
pass
@property
def channel_type(self):
pass
@property
def thread_time_stamp(self):
pass
@property
def user(self):
pass
@property
def user_id(self):
pass
@property
def bot_id(self):
pass
@property
def actions(self):
pass
@property
def item(self):
pass
@property
def item_channel(self):
pass
@property
def files(self):
pass
@property
def message(self):
pass
|
normal
|
{
"blob_id": "4a4745f202275e45fd78c12431e355fd59ac964a",
"index": 6722,
"step-1": "class SlackEvent:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n <mask token>\n\n @property\n def event_time_stamp(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-2": "class SlackEvent:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n <mask token>\n\n @property\n def event_time_stamp(self):\n pass\n\n @property\n def channel_type(self):\n pass\n <mask token>\n <mask token>\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n <mask token>\n <mask token>\n\n @property\n def item_channel(self):\n pass\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-3": "class SlackEvent:\n\n @property\n def client_msg_id(self):\n pass\n <mask token>\n\n @property\n def subtype(self):\n pass\n\n @property\n def text(self):\n pass\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n\n @property\n def channel_id(self):\n pass\n\n @property\n def event_time_stamp(self):\n pass\n\n @property\n def channel_type(self):\n pass\n <mask token>\n\n @property\n def user(self):\n pass\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n <mask token>\n <mask token>\n\n @property\n def item_channel(self):\n pass\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-4": "class SlackEvent:\n\n @property\n def client_msg_id(self):\n pass\n\n @property\n def type(self):\n pass\n\n @property\n def subtype(self):\n pass\n\n @property\n def text(self):\n pass\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n\n @property\n def channel_id(self):\n pass\n\n @property\n def event_time_stamp(self):\n pass\n\n @property\n def channel_type(self):\n pass\n <mask token>\n\n @property\n def user(self):\n pass\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n\n @property\n def actions(self):\n pass\n <mask token>\n\n @property\n def item_channel(self):\n pass\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-5": "class SlackEvent:\r\n @property\r\n def client_msg_id(self):\r\n pass\r\n\r\n @property\r\n def type(self):\r\n pass\r\n\r\n @property\r\n def subtype(self):\r\n pass\r\n\r\n @property\r\n def text(self):\r\n pass\r\n\r\n @property\r\n def time_stamp(self):\r\n pass\r\n\r\n @property\r\n def channel(self):\r\n pass\r\n\r\n @property\r\n def channel_id(self):\r\n pass\r\n\r\n @property\r\n def event_time_stamp(self):\r\n pass\r\n\r\n @property\r\n def channel_type(self):\r\n pass\r\n\r\n @property\r\n def thread_time_stamp(self):\r\n pass\r\n\r\n @property\r\n def user(self):\r\n pass\r\n\r\n @property\r\n def user_id(self):\r\n pass\r\n\r\n @property\r\n def bot_id(self):\r\n pass\r\n\r\n @property\r\n def actions(self):\r\n pass\r\n\r\n @property\r\n def item(self):\r\n pass\r\n\r\n @property\r\n def item_channel(self):\r\n pass\r\n\r\n @property\r\n def files(self):\r\n pass\r\n\r\n @property\r\n def message(self):\r\n pass\r\n",
"step-ids": [
8,
10,
15,
17,
20
]
}
|
[
8,
10,
15,
17,
20
] |
import src.engine.functions.root_analyzer.main as main
from src.engine.functions.function import Function
class GetRootData(Function):
def __init__(self, data_display):
self.data_display = data_display
def call(self, args):
image_folder_path = args[0]
output_path = args[1]
self.data_display.clear()
data = main.generate_data(image_folder_path, self.data_display.data_tracker)
error_message = self.data_display.display_data(data)
return ""
|
normal
|
{
"blob_id": "e8ea307352805bf0b5129e2ad7f7b68c44e78fc9",
"index": 9118,
"step-1": "<mask token>\n\n\nclass GetRootData(Function):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.\n data_tracker)\n error_message = self.data_display.display_data(data)\n return ''\n",
"step-4": "import src.engine.functions.root_analyzer.main as main\nfrom src.engine.functions.function import Function\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.\n data_tracker)\n error_message = self.data_display.display_data(data)\n return ''\n",
"step-5": "import src.engine.functions.root_analyzer.main as main\nfrom src.engine.functions.function import Function\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.data_tracker)\n error_message = self.data_display.display_data(data)\n return \"\"\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from typing import Tuple
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:
_, lca = self.get_lca(root, 0)
return lca
def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:
if not node:
return depth, node
left_depth, left_lca = self.get_lca(node.left, depth + 1)
right_depth, right_lca = self.get_lca(node.right, depth + 1)
if left_depth == right_depth:
return left_depth, node
if left_depth > right_depth:
return left_depth, left_lca
return right_depth, right_lca
|
normal
|
{
"blob_id": "0a528fb7fe4a318af8bd3111e8d67f6af6bd7416",
"index": 304,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TreeNode:\n <mask token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n",
"step-3": "<mask token>\n\n\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n",
"step-4": "from typing import Tuple\n\n\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
from sys import stdin
def main():
lines = stdin
n, k = map(int, lines.next().split())
if k > n:
print -1
else:
arr = map(int, lines.next().split())
arr.sort(reverse = True)
print "%d %d" % (arr[k - 1], arr[k - 1])
main()
|
normal
|
{
"blob_id": "fc04623db0d07f3a0a55ad49a74643a74e5203a6",
"index": 4938,
"step-1": "from sys import stdin\n\ndef main():\n lines = stdin\n n, k = map(int, lines.next().split())\n\n if k > n:\n print -1\n else:\n arr = map(int, lines.next().split())\n arr.sort(reverse = True)\n print \"%d %d\" % (arr[k - 1], arr[k - 1])\n \n\nmain()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
sentence = input()
check_list = ["U", "C", "P", "C"]
check = True
for i in range(len(check_list)):
if check_list[i] in sentence:
check = True
idx = sentence.find(check_list[i])
sentence = sentence[idx+1:]
else:
check = False
break
if check == True:
print("I love UCPC")
else:
print("I hate UCPC")
|
normal
|
{
"blob_id": "4545d9756d1f396ead0b0c75d319fb6a718375cd",
"index": 2108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(check_list)):\n if check_list[i] in sentence:\n check = True\n idx = sentence.find(check_list[i])\n sentence = sentence[idx + 1:]\n else:\n check = False\n break\nif check == True:\n print('I love UCPC')\nelse:\n print('I hate UCPC')\n",
"step-3": "sentence = input()\ncheck_list = ['U', 'C', 'P', 'C']\ncheck = True\nfor i in range(len(check_list)):\n if check_list[i] in sentence:\n check = True\n idx = sentence.find(check_list[i])\n sentence = sentence[idx + 1:]\n else:\n check = False\n break\nif check == True:\n print('I love UCPC')\nelse:\n print('I hate UCPC')\n",
"step-4": "sentence = input()\ncheck_list = [\"U\", \"C\", \"P\", \"C\"]\ncheck = True\n\nfor i in range(len(check_list)):\n if check_list[i] in sentence:\n check = True\n idx = sentence.find(check_list[i])\n sentence = sentence[idx+1:]\n else:\n check = False\n break\n\nif check == True:\n print(\"I love UCPC\")\nelse:\n print(\"I hate UCPC\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import os
import threading
import time
from jnpr.junos import Device
from jnpr.junos import exception
from jnpr.junos.utils.config import Config
from jnpr.junos.utils.sw import SW
from paramiko import BadHostKeyException, AuthenticationException
from scp import SCPClient
import lib.constants as c
from lib.logmsg import LogCommon
from lib.logmsg import LogSoftwareTask as logmsg
from lib.tasks.task import Task
from lib.tasks.tasktools import Software
from lib.tools import Tools
class SoftwareTask(Task):
CHECK_SCHEMA = True
TASK_TYPE = c.TASK_TYPE_PROVISION
TASK_VERSION = 1.0
sample_devices = dict()
sample_devices_lock = threading.Lock()
def __init__(self, sample_device=None, shared=None):
super(SoftwareTask, self).__init__(sample_device=sample_device, shared=shared)
self.logger.debug(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
LogCommon.IS_SUBCLASS.format(self.task_name,
issubclass(SoftwareTask, Task))))
def pre_run_task(self):
pass
def run_task(self):
"""
Provision device images
:param sample_device: A device object for which the image provisioning should be done
:return:
"""
target_version = getattr(self.grp_cfg.TASKS.Provision.Software.TargetVersion, self.sample_device.deviceModel,
None)
if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:
# Device has been rebooted do not update again
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_NO_UPDATE_NEEDED_SAME)
self.sample_device.deviceIsRebooted = False
self.update_task_state(new_task_state=c.TASK_STATE_DONE, task_state_message=c.TASK_STATE_MSG_DONE)
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_START_UPDATE.format(self.sample_device.deviceSerial))
SoftwareTask.sample_devices[self.sample_device.deviceSerial] = self.sample_device
if target_version is not None:
feedback = Software.compare_device_vers_with_target_vers(self.sample_device.softwareVersion,
target_version)
if feedback == 0:
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=logmsg.SW_DONE_SAME_VERS)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(
self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state, 'taskStateMsg': logmsg.SW_DONE_SAME_VERS},
sample_device=self.sample_device, grp_cfg=self.grp_cfg, shared=self.shared,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,
message=logmsg.SW_NO_UPDATE_NEEDED_SAME)
elif feedback == 1:
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(
self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state,
'taskStateMsg': logmsg.SW_DONE_DEV_NEWER_VERS},
sample_device=self.sample_device, grp_cfg=self.grp_cfg, shared=self.shared,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,
message=logmsg.SW_NO_UPDATE_NEEDED_NEWER)
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALLED_VERS.format(
self.sample_device.softwareVersion))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_TARGET_VERS.format(target_version))
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_UPDATE_NEEDED.format(
self.sample_device.softwareVersion, target_version))
filename = Software.get_software_image_name(self.sample_device, target_version,
grp_cfg=self.grp_cfg)
if filename:
full_path = self.grp_cfg.TASKS.Provision.Software.ImageDir + filename
if self.sample_device.deviceConnection.connected:
self.sample_device = self.install_device_software(full_path, filename, target_version)
if self.sample_device is not None:
if self.task_state != c.TASK_STATE_FAILED and self.task_state != c.TASK_STATE_REBOOTING:
if self.sample_device.deviceConnection is not None:
self.sample_device.deviceConnection.facts_refresh(keys='version')
self.sample_device.softwareVersion = self.sample_device.deviceConnection.facts[
"version"]
self.update_task_state(new_task_state=c.TASK_STATE_DONE,
task_state_message=c.TASK_STATE_MSG_DONE)
Tools.emit_log(task_name=self.task_name,
task_state={'taskState': self.task_state,
'taskStateMsg': c.TASK_STATE_MSG_DONE},
sample_device=self.sample_device, grp_cfg=self.grp_cfg,
shared=self.shared,
scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,
message=logmsg.SW_NO_UPDATE_NEEDED_SAME)
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=c.TASK_STATE_MSG_FAILED)
return
else:
return
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_CONN_NOK.format(
self.sample_device.deviceIP))
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_CONN_NOK.format(
self.sample_device.deviceIP))
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_IMG_NOK.format(target_version))
else:
self.logger.info(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,
logmsg.SW_NO_TARGET_VERS_FOUND.format(
self.sample_device.deviceModel)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_IMG_VALUE_NOK.format(
self.sample_device.deviceGroup))
def install_device_software(self, path, image, target_version):
"""
Call PyEz to install new JUNOS image to device
:param sample_device:
:param path:
:param image:
:param target_version
:return:
"""
package = os.path.join(os.getcwd(), path)
if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:
try:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CLEANUP_STORAGE)
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_CLEANUP_STORAGE)
self.sample_device.deviceConnection.rpc.request_system_storage_cleanup()
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_COPY_IMG.format(image))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_COPY_IMG.format(image))
# progress = SoftwareTask.copy_progress
with SCPClient(transport=self.sample_device.deviceConnection._conn._session.transport) as scp:
scp.put(package, remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir)
except (BadHostKeyException, AuthenticationException) as e:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_COPY_IMG_NOK.format(e.message))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message))
return self.sample_device
try:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_VERS.format(target_version))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_INSTALL_VERS.format(target_version))
result = self.sample_device.deviceConnection.sw.pkgadd(
self.grp_cfg.TASKS.Provision.Software.RemoteDir + image,
dev_timeout=self.grp_cfg.TASKS.Provision.Software.PkgAddDevTimeout)
except Exception as err:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(err)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))
return self.sample_device
if result is True:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(result)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))
time.sleep(3)
return self.sample_device
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
try:
rsp = self.sample_device.deviceConnection.sw.reboot()
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\n', " ")))
self.sample_device.deviceConnection.close()
self.sample_device.deviceIsRebooted = True
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message='Rebooting...')
c.oss_seen_devices_lck.acquire()
try:
if self.sample_device.deviceIP in c.oss_seen_devices:
c.oss_seen_devices.pop(self.sample_device.deviceIP, None)
finally:
c.oss_seen_devices_lck.release()
return self.sample_device
except exception.ConnectClosedError:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_LOOSE_REBOOT)
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)
return self.sample_device
else:
try:
result = self.sample_device.deviceConnection.sw.install(package=package,
remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir,
cleanfs=True, no_copy=False,
progress=SoftwareTask.install_progress)
except Exception as err:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(err)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=str(err))
return self.sample_device
if result is True:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_INSTALL_NOK.format(str(result)))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))
time.sleep(3)
return self.sample_device
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))
try:
rsp = self.sample_device.deviceConnection.sw.reboot()
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\n', " ")))
# self.sample_device.deviceConnection.close()
except exception.ConnectClosedError:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_CONN_LOOSE_REBOOT)
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)
finally:
alive = self.probe_device_not_alive(self.sample_device,
self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)
if not alive:
self.sample_device.deviceIsRebooted = True
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_PROBE_WAKEUP.format(self.sample_device.deviceIP))
status, self.sample_device = Tools.create_dev_conn(self.sample_device, connect=False)
if status:
alive = self.probe_device_alive(self.sample_device,
self.grp_cfg.TASKS.Provision.Software.RebootProbeTimeout)
if alive:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_PROBE_WAKUP_OK.format(self.sample_device.deviceIP))
self.sample_device.deviceIsRebooted = False
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_PROBE_WAKUP_OK.format(
self.sample_device.deviceIP))
status, self.sample_device = Tools.create_dev_conn(self.sample_device)
if status:
self.sample_device.deviceConnection.bind(cu=Config, sw=SW)
# Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
# message=logmsg.SW_CONN_OK.format(self.sample_device.deviceIP))
self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,
task_state_message=logmsg.SW_CONN_OK.format(
self.sample_device.deviceIP))
return self.sample_device
else:
return self.sample_device
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=c.TASK_STATE_MSG_FAILED)
self.sample_device.deviceConnection = None
return self.sample_device
else:
Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,
message=logmsg.SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,
self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))
self.update_task_state(new_task_state=c.TASK_STATE_FAILED,
task_state_message=logmsg.SW_PROBE_DEV_NOK.format(
self.sample_device.deviceIP,
self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))
def probe_device_alive(self, device, timeout):
"""
:param device:
:param timeout:
:return:
"""
alive = device.deviceConnection.probe(timeout=5)
probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter
probe_cntr = 0
while not alive:
if probe_cntr <= probe_attemps:
alive = device.deviceConnection.probe(timeout)
probe_cntr += 1
Tools.emit_log(task_name=self.task_name, sample_device=device,
message=logmsg.SW_PROBE_DEV.format(timeout))
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_FAILED)
break
return alive
def probe_device_not_alive(self, device, timeout):
"""
:param device:
:param timeout:
:return:
"""
alive = device.deviceConnection.probe(timeout=5)
probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter
probe_cntr = 0
while alive:
if probe_cntr <= probe_attemps:
alive = device.deviceConnection.probe(1)
probe_cntr += 1
Tools.emit_log(task_name=self.task_name, sample_device=device,
message=logmsg.SW_PROBE_DEV.format(timeout))
self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,
task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))
time.sleep(timeout)
else:
self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_MSG_FAILED)
break
return alive
@staticmethod
def install_progress(dev, report):
c.logger.info(
'[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.FIRST_PAD, dev.facts["serialnumber"], c.SECOND_PAD, report))
with SoftwareTask.sample_devices_lock:
SoftwareTask.sample_devices[dev.facts['serialnumber']].deviceTasks.taskState['Software'] = {
'taskState': c.TASK_STATE_PROGRESS, 'taskStateMsg': report}
@staticmethod
def copy_progress(filename, size, sent):
# print filename + " " + str(int(size)) + " " + str(int(sent))
# print (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024))
c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename,
(sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024)))
#with SoftwareTask.sample_devices_lock:
# SoftwareTask.sample_devices[dev.facts['serialnumber']].deviceTasks.taskState['Software'] = (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024)))
def post_run_task(self):
with SoftwareTask.sample_devices_lock:
if self.sample_device.deviceSerial in SoftwareTask.sample_devices:
del SoftwareTask.sample_devices[self.sample_device.deviceSerial]
|
normal
|
{
"blob_id": "45cdf33f509e7913f31d2c1d6bfada3a84478736",
"index": 2904,
"step-1": "<mask token>\n\n\nclass SoftwareTask(Task):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, sample_device=None, shared=None):\n super(SoftwareTask, self).__init__(sample_device=sample_device,\n shared=shared)\n self.logger.debug(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, LogCommon.IS_SUBCLASS.format(self.\n task_name, issubclass(SoftwareTask, Task))))\n\n def pre_run_task(self):\n pass\n\n def run_task(self):\n \"\"\"\n Provision device images\n\n :param sample_device: A device object for which the image provisioning should be done\n :return:\n \"\"\"\n target_version = getattr(self.grp_cfg.TASKS.Provision.Software.\n TargetVersion, self.sample_device.deviceModel, None)\n if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_INSTALLED_VERS.format(self\n .sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_TARGET_VERS.format(\n target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=c.TASK_STATE_MSG_DONE)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_START_UPDATE.format(self.\n sample_device.deviceSerial))\n SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ] = self.sample_device\n if target_version is not None:\n feedback = Software.compare_device_vers_with_target_vers(self\n .sample_device.softwareVersion, target_version)\n if feedback == 0:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_SAME_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_SAME_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n elif feedback == 1:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_DEV_NEWER_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_NEWER)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_UPDATE_NEEDED\n .format(self.sample_device.softwareVersion,\n target_version))\n filename = Software.get_software_image_name(self.\n sample_device, target_version, grp_cfg=self.grp_cfg)\n if filename:\n full_path = (self.grp_cfg.TASKS.Provision.Software.\n ImageDir + filename)\n if self.sample_device.deviceConnection.connected:\n self.sample_device = self.install_device_software(\n full_path, filename, target_version)\n if self.sample_device is not None:\n if (self.task_state != c.TASK_STATE_FAILED and\n self.task_state != c.TASK_STATE_REBOOTING):\n if (self.sample_device.deviceConnection\n is not None):\n self.sample_device.deviceConnection.facts_refresh(\n keys='version')\n (self.sample_device.softwareVersion) = (\n self.sample_device.deviceConnection\n .facts['version'])\n self.update_task_state(new_task_state=c\n .TASK_STATE_DONE,\n task_state_message=c.\n TASK_STATE_MSG_DONE)\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.\n task_state, 'taskStateMsg': c.\n TASK_STATE_MSG_DONE}, sample_device\n =self.sample_device, grp_cfg=self.\n grp_cfg, shared=self.shared, scope=\n c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c\n .TASK_STATE_FAILED,\n task_state_message=c.\n TASK_STATE_MSG_FAILED)\n return\n else:\n return\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_IMG_NOK.format(target_version))\n else:\n self.logger.info(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, logmsg.\n SW_NO_TARGET_VERS_FOUND.format(self.sample_device.\n deviceModel)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_IMG_VALUE_NOK.format(self.\n sample_device.deviceGroup))\n\n def install_device_software(self, path, image, target_version):\n \"\"\"\n Call PyEz to install new JUNOS image to device\n :param sample_device:\n :param path:\n :param image:\n :param target_version\n :return:\n \"\"\"\n package = os.path.join(os.getcwd(), path)\n if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CLEANUP_STORAGE)\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CLEANUP_STORAGE)\n self.sample_device.deviceConnection.rpc.request_system_storage_cleanup(\n )\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG.format(image))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_COPY_IMG.format(image))\n with SCPClient(transport=self.sample_device.\n deviceConnection._conn._session.transport) as scp:\n scp.put(package, remote_path=self.grp_cfg.TASKS.\n Provision.Software.RemoteDir)\n except (BadHostKeyException, AuthenticationException) as e:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG_NOK.format(e\n .message))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message)\n )\n return self.sample_device\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n result = self.sample_device.deviceConnection.sw.pkgadd(self\n .grp_cfg.TASKS.Provision.Software.RemoteDir + image,\n dev_timeout=self.grp_cfg.TASKS.Provision.Software.\n PkgAddDevTimeout)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n self.sample_device.deviceConnection.close()\n self.sample_device.deviceIsRebooted = True\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message='Rebooting...')\n c.oss_seen_devices_lck.acquire()\n try:\n if self.sample_device.deviceIP in c.oss_seen_devices:\n c.oss_seen_devices.pop(self.sample_device.deviceIP,\n None)\n finally:\n c.oss_seen_devices_lck.release()\n return self.sample_device\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n return self.sample_device\n else:\n try:\n result = self.sample_device.deviceConnection.sw.install(package\n =package, remote_path=self.grp_cfg.TASKS.Provision.\n Software.RemoteDir, cleanfs=True, no_copy=False,\n progress=SoftwareTask.install_progress)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=str(err))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n finally:\n alive = self.probe_device_not_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)\n if not alive:\n self.sample_device.deviceIsRebooted = True\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_WAKEUP.\n format(self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self\n .sample_device, connect=False)\n if status:\n alive = self.probe_device_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeTimeout)\n if alive:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(\n self.sample_device)\n if status:\n self.sample_device.deviceConnection.bind(cu\n =Config, sw=SW)\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message\n =logmsg.SW_CONN_OK.format(self.\n sample_device.deviceIP))\n return self.sample_device\n else:\n return self.sample_device\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=c.\n TASK_STATE_MSG_FAILED)\n self.sample_device.deviceConnection = None\n return self.sample_device\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_DEV_NOK\n .format(self.sample_device.deviceIP, self.grp_cfg.\n TASKS.Provision.Software.RebootProbeCounter))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter))\n <mask token>\n\n def probe_device_not_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(1)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n time.sleep(timeout)\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n break\n return alive\n\n @staticmethod\n def install_progress(dev, report):\n c.logger.info('[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.\n FIRST_PAD, dev.facts['serialnumber'], c.SECOND_PAD, report))\n with SoftwareTask.sample_devices_lock:\n SoftwareTask.sample_devices[dev.facts['serialnumber']\n ].deviceTasks.taskState['Software'] = {'taskState': c.\n TASK_STATE_PROGRESS, 'taskStateMsg': report}\n\n @staticmethod\n def copy_progress(filename, size, sent):\n c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename, \n sent / (1024 * 1024) * 100.0 / (size / (1024 * 1024)))\n\n def post_run_task(self):\n with SoftwareTask.sample_devices_lock:\n if self.sample_device.deviceSerial in SoftwareTask.sample_devices:\n del SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ]\n",
"step-2": "<mask token>\n\n\nclass SoftwareTask(Task):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, sample_device=None, shared=None):\n super(SoftwareTask, self).__init__(sample_device=sample_device,\n shared=shared)\n self.logger.debug(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, LogCommon.IS_SUBCLASS.format(self.\n task_name, issubclass(SoftwareTask, Task))))\n\n def pre_run_task(self):\n pass\n\n def run_task(self):\n \"\"\"\n Provision device images\n\n :param sample_device: A device object for which the image provisioning should be done\n :return:\n \"\"\"\n target_version = getattr(self.grp_cfg.TASKS.Provision.Software.\n TargetVersion, self.sample_device.deviceModel, None)\n if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_INSTALLED_VERS.format(self\n .sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_TARGET_VERS.format(\n target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=c.TASK_STATE_MSG_DONE)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_START_UPDATE.format(self.\n sample_device.deviceSerial))\n SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ] = self.sample_device\n if target_version is not None:\n feedback = Software.compare_device_vers_with_target_vers(self\n .sample_device.softwareVersion, target_version)\n if feedback == 0:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_SAME_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_SAME_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n elif feedback == 1:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_DEV_NEWER_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_NEWER)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_UPDATE_NEEDED\n .format(self.sample_device.softwareVersion,\n target_version))\n filename = Software.get_software_image_name(self.\n sample_device, target_version, grp_cfg=self.grp_cfg)\n if filename:\n full_path = (self.grp_cfg.TASKS.Provision.Software.\n ImageDir + filename)\n if self.sample_device.deviceConnection.connected:\n self.sample_device = self.install_device_software(\n full_path, filename, target_version)\n if self.sample_device is not None:\n if (self.task_state != c.TASK_STATE_FAILED and\n self.task_state != c.TASK_STATE_REBOOTING):\n if (self.sample_device.deviceConnection\n is not None):\n self.sample_device.deviceConnection.facts_refresh(\n keys='version')\n (self.sample_device.softwareVersion) = (\n self.sample_device.deviceConnection\n .facts['version'])\n self.update_task_state(new_task_state=c\n .TASK_STATE_DONE,\n task_state_message=c.\n TASK_STATE_MSG_DONE)\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.\n task_state, 'taskStateMsg': c.\n TASK_STATE_MSG_DONE}, sample_device\n =self.sample_device, grp_cfg=self.\n grp_cfg, shared=self.shared, scope=\n c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c\n .TASK_STATE_FAILED,\n task_state_message=c.\n TASK_STATE_MSG_FAILED)\n return\n else:\n return\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_IMG_NOK.format(target_version))\n else:\n self.logger.info(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, logmsg.\n SW_NO_TARGET_VERS_FOUND.format(self.sample_device.\n deviceModel)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_IMG_VALUE_NOK.format(self.\n sample_device.deviceGroup))\n\n def install_device_software(self, path, image, target_version):\n \"\"\"\n Call PyEz to install new JUNOS image to device\n :param sample_device:\n :param path:\n :param image:\n :param target_version\n :return:\n \"\"\"\n package = os.path.join(os.getcwd(), path)\n if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CLEANUP_STORAGE)\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CLEANUP_STORAGE)\n self.sample_device.deviceConnection.rpc.request_system_storage_cleanup(\n )\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG.format(image))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_COPY_IMG.format(image))\n with SCPClient(transport=self.sample_device.\n deviceConnection._conn._session.transport) as scp:\n scp.put(package, remote_path=self.grp_cfg.TASKS.\n Provision.Software.RemoteDir)\n except (BadHostKeyException, AuthenticationException) as e:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG_NOK.format(e\n .message))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message)\n )\n return self.sample_device\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n result = self.sample_device.deviceConnection.sw.pkgadd(self\n .grp_cfg.TASKS.Provision.Software.RemoteDir + image,\n dev_timeout=self.grp_cfg.TASKS.Provision.Software.\n PkgAddDevTimeout)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n self.sample_device.deviceConnection.close()\n self.sample_device.deviceIsRebooted = True\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message='Rebooting...')\n c.oss_seen_devices_lck.acquire()\n try:\n if self.sample_device.deviceIP in c.oss_seen_devices:\n c.oss_seen_devices.pop(self.sample_device.deviceIP,\n None)\n finally:\n c.oss_seen_devices_lck.release()\n return self.sample_device\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n return self.sample_device\n else:\n try:\n result = self.sample_device.deviceConnection.sw.install(package\n =package, remote_path=self.grp_cfg.TASKS.Provision.\n Software.RemoteDir, cleanfs=True, no_copy=False,\n progress=SoftwareTask.install_progress)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=str(err))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n finally:\n alive = self.probe_device_not_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)\n if not alive:\n self.sample_device.deviceIsRebooted = True\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_WAKEUP.\n format(self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self\n .sample_device, connect=False)\n if status:\n alive = self.probe_device_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeTimeout)\n if alive:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(\n self.sample_device)\n if status:\n self.sample_device.deviceConnection.bind(cu\n =Config, sw=SW)\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message\n =logmsg.SW_CONN_OK.format(self.\n sample_device.deviceIP))\n return self.sample_device\n else:\n return self.sample_device\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=c.\n TASK_STATE_MSG_FAILED)\n self.sample_device.deviceConnection = None\n return self.sample_device\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_DEV_NOK\n .format(self.sample_device.deviceIP, self.grp_cfg.\n TASKS.Provision.Software.RebootProbeCounter))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter))\n\n def probe_device_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while not alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(timeout)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_FAILED)\n break\n return alive\n\n def probe_device_not_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(1)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n time.sleep(timeout)\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n break\n return alive\n\n @staticmethod\n def install_progress(dev, report):\n c.logger.info('[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.\n FIRST_PAD, dev.facts['serialnumber'], c.SECOND_PAD, report))\n with SoftwareTask.sample_devices_lock:\n SoftwareTask.sample_devices[dev.facts['serialnumber']\n ].deviceTasks.taskState['Software'] = {'taskState': c.\n TASK_STATE_PROGRESS, 'taskStateMsg': report}\n\n @staticmethod\n def copy_progress(filename, size, sent):\n c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename, \n sent / (1024 * 1024) * 100.0 / (size / (1024 * 1024)))\n\n def post_run_task(self):\n with SoftwareTask.sample_devices_lock:\n if self.sample_device.deviceSerial in SoftwareTask.sample_devices:\n del SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ]\n",
"step-3": "<mask token>\n\n\nclass SoftwareTask(Task):\n CHECK_SCHEMA = True\n TASK_TYPE = c.TASK_TYPE_PROVISION\n TASK_VERSION = 1.0\n sample_devices = dict()\n sample_devices_lock = threading.Lock()\n\n def __init__(self, sample_device=None, shared=None):\n super(SoftwareTask, self).__init__(sample_device=sample_device,\n shared=shared)\n self.logger.debug(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, LogCommon.IS_SUBCLASS.format(self.\n task_name, issubclass(SoftwareTask, Task))))\n\n def pre_run_task(self):\n pass\n\n def run_task(self):\n \"\"\"\n Provision device images\n\n :param sample_device: A device object for which the image provisioning should be done\n :return:\n \"\"\"\n target_version = getattr(self.grp_cfg.TASKS.Provision.Software.\n TargetVersion, self.sample_device.deviceModel, None)\n if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_INSTALLED_VERS.format(self\n .sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_TARGET_VERS.format(\n target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=c.TASK_STATE_MSG_DONE)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_START_UPDATE.format(self.\n sample_device.deviceSerial))\n SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ] = self.sample_device\n if target_version is not None:\n feedback = Software.compare_device_vers_with_target_vers(self\n .sample_device.softwareVersion, target_version)\n if feedback == 0:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_SAME_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_SAME_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n elif feedback == 1:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_DEV_NEWER_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_NEWER)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_UPDATE_NEEDED\n .format(self.sample_device.softwareVersion,\n target_version))\n filename = Software.get_software_image_name(self.\n sample_device, target_version, grp_cfg=self.grp_cfg)\n if filename:\n full_path = (self.grp_cfg.TASKS.Provision.Software.\n ImageDir + filename)\n if self.sample_device.deviceConnection.connected:\n self.sample_device = self.install_device_software(\n full_path, filename, target_version)\n if self.sample_device is not None:\n if (self.task_state != c.TASK_STATE_FAILED and\n self.task_state != c.TASK_STATE_REBOOTING):\n if (self.sample_device.deviceConnection\n is not None):\n self.sample_device.deviceConnection.facts_refresh(\n keys='version')\n (self.sample_device.softwareVersion) = (\n self.sample_device.deviceConnection\n .facts['version'])\n self.update_task_state(new_task_state=c\n .TASK_STATE_DONE,\n task_state_message=c.\n TASK_STATE_MSG_DONE)\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.\n task_state, 'taskStateMsg': c.\n TASK_STATE_MSG_DONE}, sample_device\n =self.sample_device, grp_cfg=self.\n grp_cfg, shared=self.shared, scope=\n c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c\n .TASK_STATE_FAILED,\n task_state_message=c.\n TASK_STATE_MSG_FAILED)\n return\n else:\n return\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_IMG_NOK.format(target_version))\n else:\n self.logger.info(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, logmsg.\n SW_NO_TARGET_VERS_FOUND.format(self.sample_device.\n deviceModel)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_IMG_VALUE_NOK.format(self.\n sample_device.deviceGroup))\n\n def install_device_software(self, path, image, target_version):\n \"\"\"\n Call PyEz to install new JUNOS image to device\n :param sample_device:\n :param path:\n :param image:\n :param target_version\n :return:\n \"\"\"\n package = os.path.join(os.getcwd(), path)\n if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CLEANUP_STORAGE)\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CLEANUP_STORAGE)\n self.sample_device.deviceConnection.rpc.request_system_storage_cleanup(\n )\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG.format(image))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_COPY_IMG.format(image))\n with SCPClient(transport=self.sample_device.\n deviceConnection._conn._session.transport) as scp:\n scp.put(package, remote_path=self.grp_cfg.TASKS.\n Provision.Software.RemoteDir)\n except (BadHostKeyException, AuthenticationException) as e:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG_NOK.format(e\n .message))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message)\n )\n return self.sample_device\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n result = self.sample_device.deviceConnection.sw.pkgadd(self\n .grp_cfg.TASKS.Provision.Software.RemoteDir + image,\n dev_timeout=self.grp_cfg.TASKS.Provision.Software.\n PkgAddDevTimeout)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n self.sample_device.deviceConnection.close()\n self.sample_device.deviceIsRebooted = True\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message='Rebooting...')\n c.oss_seen_devices_lck.acquire()\n try:\n if self.sample_device.deviceIP in c.oss_seen_devices:\n c.oss_seen_devices.pop(self.sample_device.deviceIP,\n None)\n finally:\n c.oss_seen_devices_lck.release()\n return self.sample_device\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n return self.sample_device\n else:\n try:\n result = self.sample_device.deviceConnection.sw.install(package\n =package, remote_path=self.grp_cfg.TASKS.Provision.\n Software.RemoteDir, cleanfs=True, no_copy=False,\n progress=SoftwareTask.install_progress)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=str(err))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n finally:\n alive = self.probe_device_not_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)\n if not alive:\n self.sample_device.deviceIsRebooted = True\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_WAKEUP.\n format(self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self\n .sample_device, connect=False)\n if status:\n alive = self.probe_device_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeTimeout)\n if alive:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(\n self.sample_device)\n if status:\n self.sample_device.deviceConnection.bind(cu\n =Config, sw=SW)\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message\n =logmsg.SW_CONN_OK.format(self.\n sample_device.deviceIP))\n return self.sample_device\n else:\n return self.sample_device\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=c.\n TASK_STATE_MSG_FAILED)\n self.sample_device.deviceConnection = None\n return self.sample_device\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_DEV_NOK\n .format(self.sample_device.deviceIP, self.grp_cfg.\n TASKS.Provision.Software.RebootProbeCounter))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter))\n\n def probe_device_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while not alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(timeout)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_FAILED)\n break\n return alive\n\n def probe_device_not_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(1)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n time.sleep(timeout)\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n break\n return alive\n\n @staticmethod\n def install_progress(dev, report):\n c.logger.info('[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.\n FIRST_PAD, dev.facts['serialnumber'], c.SECOND_PAD, report))\n with SoftwareTask.sample_devices_lock:\n SoftwareTask.sample_devices[dev.facts['serialnumber']\n ].deviceTasks.taskState['Software'] = {'taskState': c.\n TASK_STATE_PROGRESS, 'taskStateMsg': report}\n\n @staticmethod\n def copy_progress(filename, size, sent):\n c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename, \n sent / (1024 * 1024) * 100.0 / (size / (1024 * 1024)))\n\n def post_run_task(self):\n with SoftwareTask.sample_devices_lock:\n if self.sample_device.deviceSerial in SoftwareTask.sample_devices:\n del SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ]\n",
"step-4": "import os\nimport threading\nimport time\nfrom jnpr.junos import Device\nfrom jnpr.junos import exception\nfrom jnpr.junos.utils.config import Config\nfrom jnpr.junos.utils.sw import SW\nfrom paramiko import BadHostKeyException, AuthenticationException\nfrom scp import SCPClient\nimport lib.constants as c\nfrom lib.logmsg import LogCommon\nfrom lib.logmsg import LogSoftwareTask as logmsg\nfrom lib.tasks.task import Task\nfrom lib.tasks.tasktools import Software\nfrom lib.tools import Tools\n\n\nclass SoftwareTask(Task):\n CHECK_SCHEMA = True\n TASK_TYPE = c.TASK_TYPE_PROVISION\n TASK_VERSION = 1.0\n sample_devices = dict()\n sample_devices_lock = threading.Lock()\n\n def __init__(self, sample_device=None, shared=None):\n super(SoftwareTask, self).__init__(sample_device=sample_device,\n shared=shared)\n self.logger.debug(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, LogCommon.IS_SUBCLASS.format(self.\n task_name, issubclass(SoftwareTask, Task))))\n\n def pre_run_task(self):\n pass\n\n def run_task(self):\n \"\"\"\n Provision device images\n\n :param sample_device: A device object for which the image provisioning should be done\n :return:\n \"\"\"\n target_version = getattr(self.grp_cfg.TASKS.Provision.Software.\n TargetVersion, self.sample_device.deviceModel, None)\n if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_INSTALLED_VERS.format(self\n .sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_TARGET_VERS.format(\n target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=c.TASK_STATE_MSG_DONE)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_START_UPDATE.format(self.\n sample_device.deviceSerial))\n SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ] = self.sample_device\n if target_version is not None:\n feedback = Software.compare_device_vers_with_target_vers(self\n .sample_device.softwareVersion, target_version)\n if feedback == 0:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_SAME_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_SAME_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n elif feedback == 1:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, task_state={\n 'taskState': self.task_state, 'taskStateMsg':\n logmsg.SW_DONE_DEV_NEWER_VERS}, sample_device=self.\n sample_device, grp_cfg=self.grp_cfg, shared=self.\n shared, scope=c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_NEWER)\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.\n SW_INSTALLED_VERS.format(self.sample_device.\n softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_TARGET_VERS.\n format(target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_UPDATE_NEEDED\n .format(self.sample_device.softwareVersion,\n target_version))\n filename = Software.get_software_image_name(self.\n sample_device, target_version, grp_cfg=self.grp_cfg)\n if filename:\n full_path = (self.grp_cfg.TASKS.Provision.Software.\n ImageDir + filename)\n if self.sample_device.deviceConnection.connected:\n self.sample_device = self.install_device_software(\n full_path, filename, target_version)\n if self.sample_device is not None:\n if (self.task_state != c.TASK_STATE_FAILED and\n self.task_state != c.TASK_STATE_REBOOTING):\n if (self.sample_device.deviceConnection\n is not None):\n self.sample_device.deviceConnection.facts_refresh(\n keys='version')\n (self.sample_device.softwareVersion) = (\n self.sample_device.deviceConnection\n .facts['version'])\n self.update_task_state(new_task_state=c\n .TASK_STATE_DONE,\n task_state_message=c.\n TASK_STATE_MSG_DONE)\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.\n task_state, 'taskStateMsg': c.\n TASK_STATE_MSG_DONE}, sample_device\n =self.sample_device, grp_cfg=self.\n grp_cfg, shared=self.shared, scope=\n c.LOGGER_SCOPE_ALL, level=c.\n LOGGER_LEVEL_INFO, message=logmsg.\n SW_NO_UPDATE_NEEDED_SAME)\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c\n .TASK_STATE_FAILED,\n task_state_message=c.\n TASK_STATE_MSG_FAILED)\n return\n else:\n return\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=\n logmsg.SW_CONN_NOK.format(self.\n sample_device.deviceIP))\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_IMG_NOK.format(target_version))\n else:\n self.logger.info(Tools.create_log_msg(self.task_name, self.\n sample_device.deviceSerial, logmsg.\n SW_NO_TARGET_VERS_FOUND.format(self.sample_device.\n deviceModel)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_IMG_VALUE_NOK.format(self.\n sample_device.deviceGroup))\n\n def install_device_software(self, path, image, target_version):\n \"\"\"\n Call PyEz to install new JUNOS image to device\n :param sample_device:\n :param path:\n :param image:\n :param target_version\n :return:\n \"\"\"\n package = os.path.join(os.getcwd(), path)\n if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CLEANUP_STORAGE)\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CLEANUP_STORAGE)\n self.sample_device.deviceConnection.rpc.request_system_storage_cleanup(\n )\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG.format(image))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_COPY_IMG.format(image))\n with SCPClient(transport=self.sample_device.\n deviceConnection._conn._session.transport) as scp:\n scp.put(package, remote_path=self.grp_cfg.TASKS.\n Provision.Software.RemoteDir)\n except (BadHostKeyException, AuthenticationException) as e:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_COPY_IMG_NOK.format(e\n .message))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message)\n )\n return self.sample_device\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_VERS.format(\n target_version))\n result = self.sample_device.deviceConnection.sw.pkgadd(self\n .grp_cfg.TASKS.Provision.Software.RemoteDir + image,\n dev_timeout=self.grp_cfg.TASKS.Provision.Software.\n PkgAddDevTimeout)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n self.sample_device.deviceConnection.close()\n self.sample_device.deviceIsRebooted = True\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message='Rebooting...')\n c.oss_seen_devices_lck.acquire()\n try:\n if self.sample_device.deviceIP in c.oss_seen_devices:\n c.oss_seen_devices.pop(self.sample_device.deviceIP,\n None)\n finally:\n c.oss_seen_devices_lck.release()\n return self.sample_device\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n return self.sample_device\n else:\n try:\n result = self.sample_device.deviceConnection.sw.install(package\n =package, remote_path=self.grp_cfg.TASKS.Provision.\n Software.RemoteDir, cleanfs=True, no_copy=False,\n progress=SoftwareTask.install_progress)\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=str(err))\n return self.sample_device\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_OK.format(\n self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.\n sample_device.deviceIP))\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_INSTALL_NOK.format(\n str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(\n result)))\n time.sleep(3)\n return self.sample_device\n Tools.emit_log(task_name=self.task_name, sample_device=self.\n sample_device, message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.\n sample_device.deviceIP))\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_REBOOT_DEV_RESP.\n format(rsp.replace('\\n', ' ')))\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self\n .sample_device, message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_CONN_LOOSE_REBOOT)\n finally:\n alive = self.probe_device_not_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)\n if not alive:\n self.sample_device.deviceIsRebooted = True\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_WAKEUP.\n format(self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self\n .sample_device, connect=False)\n if status:\n alive = self.probe_device_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeTimeout)\n if alive:\n Tools.emit_log(task_name=self.task_name,\n sample_device=self.sample_device, message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message=\n logmsg.SW_PROBE_WAKUP_OK.format(self.\n sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(\n self.sample_device)\n if status:\n self.sample_device.deviceConnection.bind(cu\n =Config, sw=SW)\n self.update_task_state(new_task_state=c.\n TASK_STATE_PROGRESS, task_state_message\n =logmsg.SW_CONN_OK.format(self.\n sample_device.deviceIP))\n return self.sample_device\n else:\n return self.sample_device\n else:\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=c.\n TASK_STATE_MSG_FAILED)\n self.sample_device.deviceConnection = None\n return self.sample_device\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=\n self.sample_device, message=logmsg.SW_PROBE_DEV_NOK\n .format(self.sample_device.deviceIP, self.grp_cfg.\n TASKS.Provision.Software.RebootProbeCounter))\n self.update_task_state(new_task_state=c.\n TASK_STATE_FAILED, task_state_message=logmsg.\n SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter))\n\n def probe_device_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while not alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(timeout)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_FAILED)\n break\n return alive\n\n def probe_device_not_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = (self.grp_cfg.TASKS.Provision.Software.\n RebootProbeCounter)\n probe_cntr = 0\n while alive:\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(1)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=\n device, message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.\n TASK_STATE_REBOOTING, task_state_message=logmsg.\n SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n time.sleep(timeout)\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n break\n return alive\n\n @staticmethod\n def install_progress(dev, report):\n c.logger.info('[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.\n FIRST_PAD, dev.facts['serialnumber'], c.SECOND_PAD, report))\n with SoftwareTask.sample_devices_lock:\n SoftwareTask.sample_devices[dev.facts['serialnumber']\n ].deviceTasks.taskState['Software'] = {'taskState': c.\n TASK_STATE_PROGRESS, 'taskStateMsg': report}\n\n @staticmethod\n def copy_progress(filename, size, sent):\n c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename, \n sent / (1024 * 1024) * 100.0 / (size / (1024 * 1024)))\n\n def post_run_task(self):\n with SoftwareTask.sample_devices_lock:\n if self.sample_device.deviceSerial in SoftwareTask.sample_devices:\n del SoftwareTask.sample_devices[self.sample_device.deviceSerial\n ]\n",
"step-5": "# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER\n# Copyright (c) 2018 Juniper Networks, Inc.\n# All rights reserved.\n# Use is subject to license terms.\n#\n# Author: cklewar\n\nimport os\nimport threading\nimport time\n\nfrom jnpr.junos import Device\nfrom jnpr.junos import exception\nfrom jnpr.junos.utils.config import Config\nfrom jnpr.junos.utils.sw import SW\nfrom paramiko import BadHostKeyException, AuthenticationException\nfrom scp import SCPClient\n\nimport lib.constants as c\nfrom lib.logmsg import LogCommon\nfrom lib.logmsg import LogSoftwareTask as logmsg\nfrom lib.tasks.task import Task\nfrom lib.tasks.tasktools import Software\nfrom lib.tools import Tools\n\n\nclass SoftwareTask(Task):\n CHECK_SCHEMA = True\n TASK_TYPE = c.TASK_TYPE_PROVISION\n TASK_VERSION = 1.0\n\n sample_devices = dict()\n sample_devices_lock = threading.Lock()\n\n def __init__(self, sample_device=None, shared=None):\n\n super(SoftwareTask, self).__init__(sample_device=sample_device, shared=shared)\n self.logger.debug(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,\n LogCommon.IS_SUBCLASS.format(self.task_name,\n issubclass(SoftwareTask, Task))))\n\n def pre_run_task(self):\n pass\n\n def run_task(self):\n \"\"\"\n Provision device images\n\n :param sample_device: A device object for which the image provisioning should be done\n :return:\n \"\"\"\n\n target_version = getattr(self.grp_cfg.TASKS.Provision.Software.TargetVersion, self.sample_device.deviceModel,\n None)\n\n if self.sample_device.deviceStatus == c.DEVICE_STATUS_REBOOTED:\n\n # Device has been rebooted do not update again\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALLED_VERS.format(self.sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_TARGET_VERS.format(target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_DONE, task_state_message=c.TASK_STATE_MSG_DONE)\n\n else:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_START_UPDATE.format(self.sample_device.deviceSerial))\n SoftwareTask.sample_devices[self.sample_device.deviceSerial] = self.sample_device\n\n if target_version is not None:\n feedback = Software.compare_device_vers_with_target_vers(self.sample_device.softwareVersion,\n target_version)\n\n if feedback == 0:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_SAME_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALLED_VERS.format(\n self.sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_TARGET_VERS.format(target_version))\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.task_state, 'taskStateMsg': logmsg.SW_DONE_SAME_VERS},\n sample_device=self.sample_device, grp_cfg=self.grp_cfg, shared=self.shared,\n scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,\n message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n\n elif feedback == 1:\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=logmsg.SW_DONE_DEV_NEWER_VERS)\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALLED_VERS.format(\n self.sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_TARGET_VERS.format(target_version))\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.task_state,\n 'taskStateMsg': logmsg.SW_DONE_DEV_NEWER_VERS},\n sample_device=self.sample_device, grp_cfg=self.grp_cfg, shared=self.shared,\n scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,\n message=logmsg.SW_NO_UPDATE_NEEDED_NEWER)\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALLED_VERS.format(\n self.sample_device.softwareVersion))\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_TARGET_VERS.format(target_version))\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_UPDATE_NEEDED.format(\n self.sample_device.softwareVersion, target_version))\n filename = Software.get_software_image_name(self.sample_device, target_version,\n grp_cfg=self.grp_cfg)\n\n if filename:\n\n full_path = self.grp_cfg.TASKS.Provision.Software.ImageDir + filename\n\n if self.sample_device.deviceConnection.connected:\n\n self.sample_device = self.install_device_software(full_path, filename, target_version)\n\n if self.sample_device is not None:\n\n if self.task_state != c.TASK_STATE_FAILED and self.task_state != c.TASK_STATE_REBOOTING:\n\n if self.sample_device.deviceConnection is not None:\n\n self.sample_device.deviceConnection.facts_refresh(keys='version')\n self.sample_device.softwareVersion = self.sample_device.deviceConnection.facts[\n \"version\"]\n self.update_task_state(new_task_state=c.TASK_STATE_DONE,\n task_state_message=c.TASK_STATE_MSG_DONE)\n Tools.emit_log(task_name=self.task_name,\n task_state={'taskState': self.task_state,\n 'taskStateMsg': c.TASK_STATE_MSG_DONE},\n sample_device=self.sample_device, grp_cfg=self.grp_cfg,\n shared=self.shared,\n scope=c.LOGGER_SCOPE_ALL, level=c.LOGGER_LEVEL_INFO,\n message=logmsg.SW_NO_UPDATE_NEEDED_SAME)\n else:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n return\n\n else:\n return\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_CONN_NOK.format(\n self.sample_device.deviceIP))\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_NOK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_CONN_NOK.format(\n self.sample_device.deviceIP))\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_IMG_NOK.format(target_version))\n else:\n self.logger.info(Tools.create_log_msg(self.task_name, self.sample_device.deviceSerial,\n logmsg.SW_NO_TARGET_VERS_FOUND.format(\n self.sample_device.deviceModel)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_IMG_VALUE_NOK.format(\n self.sample_device.deviceGroup))\n\n def install_device_software(self, path, image, target_version):\n \"\"\"\n Call PyEz to install new JUNOS image to device\n :param sample_device:\n :param path:\n :param image:\n :param target_version\n :return:\n \"\"\"\n\n package = os.path.join(os.getcwd(), path)\n\n if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:\n\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CLEANUP_STORAGE)\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CLEANUP_STORAGE)\n self.sample_device.deviceConnection.rpc.request_system_storage_cleanup()\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_COPY_IMG.format(image))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_COPY_IMG.format(image))\n # progress = SoftwareTask.copy_progress\n with SCPClient(transport=self.sample_device.deviceConnection._conn._session.transport) as scp:\n scp.put(package, remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir)\n\n except (BadHostKeyException, AuthenticationException) as e:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_COPY_IMG_NOK.format(e.message))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message))\n return self.sample_device\n\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_VERS.format(target_version))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_VERS.format(target_version))\n result = self.sample_device.deviceConnection.sw.pkgadd(\n self.grp_cfg.TASKS.Provision.Software.RemoteDir + image,\n dev_timeout=self.grp_cfg.TASKS.Provision.Software.PkgAddDevTimeout)\n\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))\n return self.sample_device\n\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))\n time.sleep(3)\n return self.sample_device\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\\n', \" \")))\n self.sample_device.deviceConnection.close()\n self.sample_device.deviceIsRebooted = True\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message='Rebooting...')\n c.oss_seen_devices_lck.acquire()\n\n try:\n if self.sample_device.deviceIP in c.oss_seen_devices:\n c.oss_seen_devices.pop(self.sample_device.deviceIP, None)\n finally:\n c.oss_seen_devices_lck.release()\n\n return self.sample_device\n\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)\n return self.sample_device\n\n else:\n\n try:\n result = self.sample_device.deviceConnection.sw.install(package=package,\n remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir,\n cleanfs=True, no_copy=False,\n progress=SoftwareTask.install_progress)\n except Exception as err:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=str(err))\n return self.sample_device\n\n if result is True:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n\n else:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))\n time.sleep(3)\n return self.sample_device\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\\n', \" \")))\n # self.sample_device.deviceConnection.close()\n\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)\n finally:\n\n alive = self.probe_device_not_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)\n\n if not alive:\n self.sample_device.deviceIsRebooted = True\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_PROBE_WAKEUP.format(self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self.sample_device, connect=False)\n\n if status:\n\n alive = self.probe_device_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RebootProbeTimeout)\n\n if alive:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_PROBE_WAKUP_OK.format(self.sample_device.deviceIP))\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_PROBE_WAKUP_OK.format(\n self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self.sample_device)\n\n if status:\n\n self.sample_device.deviceConnection.bind(cu=Config, sw=SW)\n # Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n # message=logmsg.SW_CONN_OK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CONN_OK.format(\n self.sample_device.deviceIP))\n\n return self.sample_device\n\n else:\n return self.sample_device\n\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n self.sample_device.deviceConnection = None\n return self.sample_device\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_PROBE_DEV_NOK.format(\n self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))\n\n def probe_device_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter\n probe_cntr = 0\n\n while not alive:\n\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(timeout)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=device,\n message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_FAILED)\n break\n\n return alive\n\n def probe_device_not_alive(self, device, timeout):\n \"\"\"\n\n :param device:\n :param timeout:\n :return:\n \"\"\"\n\n alive = device.deviceConnection.probe(timeout=5)\n probe_attemps = self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter\n probe_cntr = 0\n\n while alive:\n\n if probe_cntr <= probe_attemps:\n alive = device.deviceConnection.probe(1)\n probe_cntr += 1\n Tools.emit_log(task_name=self.task_name, sample_device=device,\n message=logmsg.SW_PROBE_DEV.format(timeout))\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message=logmsg.SW_PROBE_WAIT_REBOOT.format(str(probe_cntr)))\n time.sleep(timeout)\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=c.TASK_STATE_MSG_FAILED)\n break\n\n return alive\n\n @staticmethod\n def install_progress(dev, report):\n c.logger.info(\n '[{0:{1}}][{2:{3}}][{4}]'.format('SOFTWARE', c.FIRST_PAD, dev.facts[\"serialnumber\"], c.SECOND_PAD, report))\n\n with SoftwareTask.sample_devices_lock:\n SoftwareTask.sample_devices[dev.facts['serialnumber']].deviceTasks.taskState['Software'] = {\n 'taskState': c.TASK_STATE_PROGRESS, 'taskStateMsg': report}\n\n @staticmethod\n def copy_progress(filename, size, sent):\n # print filename + \" \" + str(int(size)) + \" \" + str(int(sent))\n # print (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024))\n\n c.logger.info('PROVSW: Copy file <%s> progress <%s>', filename,\n (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024)))\n #with SoftwareTask.sample_devices_lock:\n # SoftwareTask.sample_devices[dev.facts['serialnumber']].deviceTasks.taskState['Software'] = (sent / (1024 * 1024)) * 100.0 / (size / (1024 * 1024)))\n\n def post_run_task(self):\n with SoftwareTask.sample_devices_lock:\n if self.sample_device.deviceSerial in SoftwareTask.sample_devices:\n del SoftwareTask.sample_devices[self.sample_device.deviceSerial]\n",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
#!/usr/bin/env python
import webapp2 # web application framework
import jinja2 # template engine
import os # access file system
import csv
from google.appengine.api import users # Google account authentication
from google.appengine.ext import db # datastore
# initialise template
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class Contact(db.Expando): # allows for different number of fields
''' User data model '''
pid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
''' Home page handler '''
def get(self):
''' Show home page '''
# import data
# check if valid Google account
# school_register = csv.reader(open('data.csv'),delimiter=',')
# found = False
user = users.get_current_user()
# for student in school_register: # if valid logged in user
# if student[0] == self.request.get('pid'):
# contact = student
# found = True
# break
if user:
# logout link
url = users.create_logout_url(self.request.uri)
# logout text
url_linktext = 'Logout'
# retrieve user record from datastore
# may get multiple records, so in order to get one record:
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result: #if user record found
contact = result[0]
greeting = ("Welcome %s!" % (contact.name,)) #1 item in couple = put comma
else: #not found
contact = "Invalid dhs.sg user"
greeting = ""
else: # not logged in
# login link
url = users.create_login_url(self.request.uri)
# login text
url_linktext = 'Login'
contact = "Not authorised"
greeting = "You need to"
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
}
# create index.html template
template = jinja_environment.get_template('index.html')
# associate template values with template
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
''' Submit form '''
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = ("User: %s" % (contact.name,))
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
'contact.handphone': updated_handphone,
'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark,
}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
# main
contact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = '[email protected]', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
|
normal
|
{
"blob_id": "aeef27d667f95e3818f73533439385ea949b96a4",
"index": 2445,
"step-1": "<mask token>\n\n\nclass Submit(webapp2.RequestHandler):\n <mask token>\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Contact(db.Expando):\n <mask token>\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\n<mask token>\n",
"step-3": "<mask token>\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)))\n\n\nclass Contact(db.Expando):\n \"\"\" User data model \"\"\"\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\ncontact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',\n email='[email protected]', handphone='', tickets_csjh='', tickets_edssh\n ='', remark='')\ncontact2.put()\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],\n debug=True)\n",
"step-4": "import webapp2\nimport jinja2\nimport os\nimport csv\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)))\n\n\nclass Contact(db.Expando):\n \"\"\" User data model \"\"\"\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\ncontact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',\n email='[email protected]', handphone='', tickets_csjh='', tickets_edssh\n ='', remark='')\ncontact2.put()\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],\n debug=True)\n",
"step-5": "#!/usr/bin/env python\n\nimport webapp2 # web application framework\nimport jinja2 # template engine\nimport os \t # access file system\nimport csv\nfrom google.appengine.api import users\t# Google account authentication\nfrom google.appengine.ext import db\t\t# datastore\n\n# initialise template\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nclass Contact(db.Expando): # allows for different number of fields\n\t''' User data model '''\n\tpid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster\n\tname = db.StringProperty(required=True)\n\tclass12 = db.StringProperty(required=True)\n\temail = db.EmailProperty(required=True)\n\thandphone = db.StringProperty(required=False)\n\ttickets_csjh = db.StringProperty(required=False)\n\ttickets_edssh = db.StringProperty(required=False)\n\tremark = db.TextProperty()\n\n\t\nclass MainHandler(webapp2.RequestHandler):\n\t''' Home page handler '''\n\tdef get(self):\n\t\t''' Show home page '''\n\t\t# import data\n\t\t# check if valid Google account\n#\t\tschool_register = csv.reader(open('data.csv'),delimiter=',')\n#\t\tfound = False\n\t\tuser = users.get_current_user()\n\t\n#\t\tfor student in school_register:\t# if valid logged in user\n#\t\t\tif student[0] == self.request.get('pid'):\n#\t\t\t\tcontact = student\n#\t\t\t\tfound = True\n#\t\t\t\tbreak\n\n\t\tif user: \n\t\t\t# logout link\n\t\t\turl = users.create_logout_url(self.request.uri)\n\t\t\t# logout text\n\t\t\turl_linktext = 'Logout'\n\t\t\t# retrieve user record from datastore\n\t\t\t# may get multiple records, so in order to get one record:\n\t\t\tquery = Contact.gql('WHERE pid = :1', user.nickname())\n\t\t\tresult = query.fetch(1)\n\t\t\tif result: #if user record found\n\t\t\t\tcontact = result[0]\n\t\t\t\tgreeting = (\"Welcome %s!\" % (contact.name,)) #1 item in couple = put comma\n\t\t\telse: #not found\n\t\t\t\tcontact = \"Invalid dhs.sg user\"\n\t\t\t\tgreeting = \"\"\n\t\t\t\n\t\telse: # not logged in \n\t\t\t\t# login link\n\t\t\turl = users.create_login_url(self.request.uri)\n\t\t\t\t# login text\n\t\t\turl_linktext = 'Login'\n\t\t\tcontact = \"Not authorised\"\n\t\t\tgreeting = \"You need to\"\n\t\t\t\n\t\ttemplate_values = {\n\t\t\t'contact': contact,\n\t\t\t'greeting': greeting,\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t}\n\t\t\n\t\t# create index.html template\n\t\ttemplate = jinja_environment.get_template('index.html')\n\t\t# associate template values with template\n\t\tself.response.out.write(template.render(template_values))\n\nclass Submit(webapp2.RequestHandler):\n\t''' Submit form '''\n\tdef post(self):\n\t\tif self.request.get('submit'):\n\t\t\tupdated_handphone = self.request.get('handphone')\n\t\t\tupdated_tickets_csjh = self.request.get('tickets_csjh')\n\t\t\tupdated_tickets_edssh = self.request.get('tickets_edssh')\n\t\t\tupdated_remark = self.request.get('remark')\n\t\t\turl = users.create_logout_url(self.request.uri)\n\t\t\turl_linktext = 'Logout'\n\t\t\tuser = users.get_current_user()\n\t\t\tquery = Contact.gql('WHERE pid = :1', user.nickname())\n\t\t\tresult = query.fetch(1)\n\t\t\t\n\t\t\tif result: \n\t\t\t\tcontact = result[0]\n\t\t\t\tgreeting = (\"User: %s\" % (contact.name,)) \n\t\t\t\tcontact.handphone = updated_handphone\n\t\t\t\tcontact.tickets_csjh = updated_tickets_csjh\n\t\t\t\tcontact.tickets_edssh = updated_tickets_edssh\n\t\t\t\tcontact.remark = db.Text(updated_remark)\n\t\t\t\tcontact.put()\n\t\t\telse: \t\n\t\t\t\tself.response.out.write('Reservation failed!')\n\t\n\t\t\n\t\ttemplate_values = {\n\t\t\t'contact': contact,\n\t\t\t'greeting': greeting,\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t\t'contact.handphone': updated_handphone,\n\t\t\t'contact.tickets_csjh': updated_tickets_csjh,\n\t\t\t'contact.tickets_edssh': updated_tickets_edssh,\n\t\t\t'contact.remark': updated_remark,\n\t\t}\n\t\t\n\t\ttemplate = jinja_environment.get_template('submit.html') \n\t\tself.response.out.write(template.render(template_values))\n\n# main\n\ncontact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = '[email protected]', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')\ncontact2.put()\n\t\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)], \n\t\t\t\t\t\t\t\tdebug=True)\n\n \n",
"step-ids": [
2,
8,
11,
12,
13
]
}
|
[
2,
8,
11,
12,
13
] |
import pandas as pd #@UnusedImport
import matplotlib.pyplot as plt
import matplotlib #@UnusedImport
import numpy as np #@UnusedImport
class Plotter():
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst,PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25,9))
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
ax1.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable
ax2.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable
ax = [ax1,ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17,-0.1))
# plt.suptitle('Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition', fontsize=14)
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def AlkDMIonStatsPlot(self, df):
DataSets_lst = df['DataSet'].unique()
fig = plt.figure(figsize=(15.5,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,1.00,8))) #@UndefinedVariable
for dset in DataSets_lst:
df_sliced = df[df['DataSet'] == dset].copy()
instrument = df_sliced['inst'].iloc[2]
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=instrument, v=offset, d=dv)
ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel('Ave. Aera Per Ion')
plt.xlabel('Sample Injections')
plt.title('Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition')
legend_h_offset, legend_v_offset = 1.25, 0.75
plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl, plot_title, png_filename, legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):
# xdata & ydata: both are a list of lists each containing the corresponding axis data. These are the requirement of these two
# data set to prevent an error:
# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.
# There must be the same number of sub lists to prevent an error.
# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.
# The rest are self explainatory
fig = plt.figure(figsize=(15.5,9))
ax = fig.add_subplot(1,1,1)
for i in range(len(xdata_lst)):
ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i], label=legendlbl_lst[i])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel(ylbl)
plt.xlabel(xlbl)
plt.title(plot_title)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
# (x_data, all_y_data, legendlbl_lst, xlbl, plot_titles, figure_title, all_png_filenames)
def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9, legend_v_offset=2.4, legend_location='center'):
# xdata_lst: is a list of lists each containing the corresponding x-axis data. The x-axis data is the same for all ax_n objects
# Generic example: [Series_1_x-axis_data_lst, Series_n_x-axis_data_lst...]
# ydata_lst: is a list of lists of lists containing all the y-axis data.
# Generic example: [ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...], ax_n[ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...]...]
# data set to prevent an error:
# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.
# There must be the same number of sub lists to prevent an error.
# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.
# The rest are self explainatory
fig = plt.figure(figsize=(25,9))
ax = []
for a in range(4):
ax.append(fig.add_subplot(2,2,1+a))
ax[a].set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
for s in range(len(xdata_lst)):
ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=legendlbl_lst[s])
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel(ylbl_lst[a])
if (a == 2 or a == 3) and s == 1:
plt.xlabel(xlbl)
elif (a == 0 or a == 1) and s == 1:
ax[a].set_xticklabels([])
ax[a].spines['bottom'].set_visible(False)
ax[a].xaxis.set_ticks_position('none')
plt.suptitle(fig_title, fontsize=20)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
xdata = [0,150,250,350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696],[0.0083151, 0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle('IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99', fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
xdata = [0,150,250,350]
ydata = [[-7.7, 26.5, 42.8, 66.1],[-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
# plt.suptitle('Change in Optimized Detector Voltage\nFrom the Beginning to the End of a Data Set', fontsize=20)
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show()
|
normal
|
{
"blob_id": "81b920ab5417937dc0fc1c9675d393efc6a4d58d",
"index": 5453,
"step-1": "<mask token>\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n <mask token>\n <mask token>\n <mask token>\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-2": "<mask token>\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n <mask token>\n\n def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,\n legendlbl_lst, xlbl, ylbl, plot_title, png_filename,\n legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n for i in range(len(xdata_lst)):\n ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],\n label=legendlbl_lst[i])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel(ylbl)\n plt.xlabel(xlbl)\n plt.title(plot_title)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n <mask token>\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-3": "<mask token>\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def AlkDMIonStatsPlot(self, df):\n DataSets_lst = df['DataSet'].unique()\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 1.0, 8)))\n for dset in DataSets_lst:\n df_sliced = df[df['DataSet'] == dset].copy()\n instrument = df_sliced['inst'].iloc[2]\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=\n instrument, v=offset, d=dv)\n ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'],\n label=curve_label)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel('Ave. Aera Per Ion')\n plt.xlabel('Sample Injections')\n plt.title(\n \"\"\"Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition\"\"\"\n )\n legend_h_offset, legend_v_offset = 1.25, 0.75\n plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,\n legendlbl_lst, xlbl, ylbl, plot_title, png_filename,\n legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n for i in range(len(xdata_lst)):\n ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],\n label=legendlbl_lst[i])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel(ylbl)\n plt.xlabel(xlbl)\n plt.title(plot_title)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst,\n xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9,\n legend_v_offset=2.4, legend_location='center'):\n fig = plt.figure(figsize=(25, 9))\n ax = []\n for a in range(4):\n ax.append(fig.add_subplot(2, 2, 1 + a))\n ax[a].set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25,\n 0.84, 2)))\n for s in range(len(xdata_lst)):\n ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=\n legendlbl_lst[s])\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel(ylbl_lst[a])\n if (a == 2 or a == 3) and s == 1:\n plt.xlabel(xlbl)\n elif (a == 0 or a == 1) and s == 1:\n ax[a].set_xticklabels([])\n ax[a].spines['bottom'].set_visible(False)\n ax[a].xaxis.set_ticks_position('none')\n plt.suptitle(fig_title, fontsize=20)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def AlkDMIonStatsPlot(self, df):\n DataSets_lst = df['DataSet'].unique()\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 1.0, 8)))\n for dset in DataSets_lst:\n df_sliced = df[df['DataSet'] == dset].copy()\n instrument = df_sliced['inst'].iloc[2]\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=\n instrument, v=offset, d=dv)\n ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'],\n label=curve_label)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel('Ave. Aera Per Ion')\n plt.xlabel('Sample Injections')\n plt.title(\n \"\"\"Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition\"\"\"\n )\n legend_h_offset, legend_v_offset = 1.25, 0.75\n plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,\n legendlbl_lst, xlbl, ylbl, plot_title, png_filename,\n legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n for i in range(len(xdata_lst)):\n ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],\n label=legendlbl_lst[i])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel(ylbl)\n plt.xlabel(xlbl)\n plt.title(plot_title)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst,\n xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9,\n legend_v_offset=2.4, legend_location='center'):\n fig = plt.figure(figsize=(25, 9))\n ax = []\n for a in range(4):\n ax.append(fig.add_subplot(2, 2, 1 + a))\n ax[a].set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25,\n 0.84, 2)))\n for s in range(len(xdata_lst)):\n ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=\n legendlbl_lst[s])\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel(ylbl_lst[a])\n if (a == 2 or a == 3) and s == 1:\n plt.xlabel(xlbl)\n elif (a == 0 or a == 1) and s == 1:\n ax[a].set_xticklabels([])\n ax[a].spines['bottom'].set_visible(False)\n ax[a].xaxis.set_ticks_position('none')\n plt.suptitle(fig_title, fontsize=20)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-5": "import pandas as pd #@UnusedImport\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib #@UnusedImport\r\nimport numpy as np #@UnusedImport\r\n\r\nclass Plotter():\r\n\tdef __init__(self):\r\n\t\tself.red_hex_code = '#ff0000'\r\n\r\n\tdef AlkDMIonStatsSplitPlot(self, df):\r\n\t\tPV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\r\n\t\tPV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\r\n\t\tinst_sets = [PV1_DataSets_lst,PV2_DataSets_lst]\r\n\t\tax_title = ['Peg-BT PV1', 'Peg-BT PV2']\r\n\t\t\r\n\t\t\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax1 = fig.add_subplot(1,2,1)\r\n\t\tax2 = fig.add_subplot(1,2,2)\t\t\r\n\t\tax1.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable\r\n\t\tax2.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable\r\n\t\tax = [ax1,ax2]\r\n\t\t\r\n\t\tfor a in range(2):\r\n\t\t\t\r\n\t\t\tax[a].spines['right'].set_visible(False)\r\n\t\t\tax[a].spines['top'].set_visible(False)\r\n\t\t\tax[a].set_ylabel('Area Per Ion via Detector Measurement')\r\n\t\t\tax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\r\n\t\t\tax[a].set_title(ax_title[a])\r\n\t\t\t\r\n\t\t\tfor dset in inst_sets[a]:\r\n\t\t\t\tdf_sliced = df[df['DataSet'] == dset].copy()\r\n\t\t\t\toffset = df_sliced['offset_volts'].iloc[2]\r\n\t\t\t\tdv = df_sliced['Det_Volts'].iloc[2]\r\n\t\t\t\tcurve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\r\n\t\t\t\tax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)\r\n\t\t\t\t\r\n\t\t\tax[a].legend(loc='center', bbox_to_anchor=(0.17,-0.1))\r\n\t\t\r\n# \t\tplt.suptitle('Tracking Area Per Ion via Detector Measurement\\nOver ~48 Hours of Continuous Sample Acquisition', fontsize=14)\r\n\t\tplt.savefig('DM_API_Analysis', bbox_inches='tight')\r\n\t\tplt.show()\r\n\r\n\r\n\t\r\n\tdef AlkDMIonStatsPlot(self, df):\r\n\t\tDataSets_lst = df['DataSet'].unique()\r\n\t\tfig = plt.figure(figsize=(15.5,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\tax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,1.00,8))) #@UndefinedVariable\r\n\t\t\r\n\t\tfor dset in DataSets_lst:\r\n\t\t\tdf_sliced = df[df['DataSet'] == dset].copy()\r\n\t\t\tinstrument = df_sliced['inst'].iloc[2]\r\n\t\t\toffset = df_sliced['offset_volts'].iloc[2]\r\n\t\t\tdv = df_sliced['Det_Volts'].iloc[2]\r\n\t\t\tcurve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=instrument, v=offset, d=dv)\r\n\t\t\t\r\n\t\t\tax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)\r\n\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\t\r\n\t\tplt.ylabel('Ave. Aera Per Ion')\r\n\t\tplt.xlabel('Sample Injections')\r\n\t\tplt.title('Tracking Area Per Ion via Detector Measurement\\nOver ~48 Hours of Continuous Sample Acquisition')\r\n\r\n\t\tlegend_h_offset, legend_v_offset = 1.25, 0.75\r\n\t\tplt.legend(loc='center right', bbox_to_anchor=(legend_h_offset, legend_v_offset))\r\n\t\tplt.savefig('DM_API_Analysis', bbox_inches='tight')\r\n\t\tplt.show()\r\n\t\t\r\n\tdef GenericIndividualPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl, plot_title, png_filename, legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\r\n\t\t# xdata & ydata: both are a list of lists each containing the corresponding axis data. These are the requirement of these two\r\n\t\t\t# data set to prevent an error:\r\n\t\t\t\t# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.\r\n\t\t\t\t# There must be the same number of sub lists to prevent an error.\r\n\t\t# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.\r\n\t\t# The rest are self explainatory\r\n\t\tfig = plt.figure(figsize=(15.5,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\t\r\n\t\tfor i in range(len(xdata_lst)):\r\n\t\t\tax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i], label=legendlbl_lst[i])\r\n\t\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\t\r\n\t\tplt.ylabel(ylbl)\r\n\t\tplt.xlabel(xlbl)\r\n\t\tplt.title(plot_title)\r\n\r\n\t\tplt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))\r\n\t\tplt.savefig(png_filename, bbox_inches='tight')\r\n\t\t\r\n\t\t# (x_data, all_y_data, legendlbl_lst, xlbl, plot_titles, figure_title, all_png_filenames)\r\n\tdef GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9, legend_v_offset=2.4, legend_location='center'):\r\n\t\t# xdata_lst: is a list of lists each containing the corresponding x-axis data. The x-axis data is the same for all ax_n objects\r\n\t\t\t# Generic example: [Series_1_x-axis_data_lst, Series_n_x-axis_data_lst...]\r\n\t\t# ydata_lst: is a list of lists of lists containing all the y-axis data.\r\n\t\t\t# Generic example: [ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...], ax_n[ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...]...]\t\r\n\t\t\t# data set to prevent an error:\r\n\t\t\t\t# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.\r\n\t\t\t\t# There must be the same number of sub lists to prevent an error.\r\n\t\t# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.\r\n\t\t# The rest are self explainatory\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax = []\r\n\t\t\r\n\t\tfor a in range(4):\r\n\t\t\tax.append(fig.add_subplot(2,2,1+a))\r\n\t\t\tax[a].set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable\r\n\t\t\t\r\n\t\t\tfor s in range(len(xdata_lst)):\r\n\t\t\t\tax[a].plot(xdata_lst[s], ydata_lst[a][s], label=legendlbl_lst[s])\r\n\t\t\t\tax[a].spines['right'].set_visible(False)\r\n\t\t\t\tax[a].spines['top'].set_visible(False)\r\n\t\t\t\tax[a].set_ylabel(ylbl_lst[a])\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\tif (a == 2 or a == 3) and s == 1:\r\n\t\t\t\t\tplt.xlabel(xlbl)\r\n\t\t\t\telif (a == 0 or a == 1) and s == 1:\r\n\t\t\t\t\tax[a].set_xticklabels([])\r\n\t\t\t\t\tax[a].spines['bottom'].set_visible(False)\r\n\t\t\t\t\tax[a].xaxis.set_ticks_position('none')\r\n\t\t\t\t\t\r\n\t\tplt.suptitle(fig_title, fontsize=20)\r\n\t\tplt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))\r\n\t\tplt.savefig(png_filename, bbox_inches='tight')\r\n\t\t\r\n\tdef Manual_OFN20fg_IDL(self):\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\tax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable\r\n\t\t\r\n\t\txdata = [0,150,250,350]\r\n\t\tydata = [[0.036614, 0.009674, 0.0056418, 0.004696],[0.0083151, 0.0044855, 0.0046082, 0.0033099]]\r\n\t\tlegendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\r\n\t\t\r\n\t\tfor s in range(len(ydata)):\r\n\t\t\tax.plot(xdata, ydata[s], label=legendlbl_lst[s])\r\n\t\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\tax.set_ylabel('IDL pg')\r\n\t\tax.set_xlabel('Optimized Detector Voltage Offset (volts)')\r\n\t\tplt.legend()\r\n\t\tplt.suptitle('IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99', fontsize=20)\r\n\t\tplt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\r\n\t\t\r\n\tdef Manual_GO_Plot(self):\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\tax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable\r\n\t\t\r\n\t\txdata = [0,150,250,350]\r\n\t\tydata = [[-7.7, 26.5, 42.8, 66.1],[-8, 4.1, 13.5, 48.4]]\r\n\t\tlegendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\r\n\t\t\r\n\t\tfor s in range(len(ydata)):\r\n\t\t\tax.plot(xdata, ydata[s], label=legendlbl_lst[s])\r\n\t\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\tax.set_ylabel('Change in Optimized Detector Voltage')\r\n\t\tax.set_xlabel('Optimized Detector Voltage Offset (volts)')\r\n\t\tplt.legend()\r\n# \t\tplt.suptitle('Change in Optimized Detector Voltage\\nFrom the Beginning to the End of a Data Set', fontsize=20)\r\n\t\tplt.savefig('GO_Delta_Plot', bbox_inches='tight')\r\n\t\tplt.show()",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
import math
def sieve(n):
sieve = [1] * (n+1)
sieve[1] = 0
sieve[0] = 0
for i in range(2, int(math.sqrt(n) + 1)):
if sieve[i] == 1:
for j in range(i*i, n + 1, i):
sieve[j] = 0
return sieve
def odd_prime(a):
while a != 0:
y = a % 10
if y == 3 or y == 5 or y ==7:
return False
else:
a = a // 10
return True
def main():
t = int(input())
for j in range(t):
x = int(input())
n = 75000
arr = sieve(n)
result = []
final = []
sum = 0
for i in range(len(arr)):
if arr[i] == 1:
result.append(i)
for i in range(len(result)):
if (odd_prime(result[i])):
final.append(result[i])
for i in range(x):
sum = sum + final[i]
print(sum)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "60617ff6eda880e5467b3b79d3df13a7147f5990",
"index": 3329,
"step-1": "<mask token>\n\n\ndef sieve(n):\n sieve = [1] * (n + 1)\n sieve[1] = 0\n sieve[0] = 0\n for i in range(2, int(math.sqrt(n) + 1)):\n if sieve[i] == 1:\n for j in range(i * i, n + 1, i):\n sieve[j] = 0\n return sieve\n\n\ndef odd_prime(a):\n while a != 0:\n y = a % 10\n if y == 3 or y == 5 or y == 7:\n return False\n else:\n a = a // 10\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sieve(n):\n sieve = [1] * (n + 1)\n sieve[1] = 0\n sieve[0] = 0\n for i in range(2, int(math.sqrt(n) + 1)):\n if sieve[i] == 1:\n for j in range(i * i, n + 1, i):\n sieve[j] = 0\n return sieve\n\n\ndef odd_prime(a):\n while a != 0:\n y = a % 10\n if y == 3 or y == 5 or y == 7:\n return False\n else:\n a = a // 10\n return True\n\n\ndef main():\n t = int(input())\n for j in range(t):\n x = int(input())\n n = 75000\n arr = sieve(n)\n result = []\n final = []\n sum = 0\n for i in range(len(arr)):\n if arr[i] == 1:\n result.append(i)\n for i in range(len(result)):\n if odd_prime(result[i]):\n final.append(result[i])\n for i in range(x):\n sum = sum + final[i]\n print(sum)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sieve(n):\n sieve = [1] * (n + 1)\n sieve[1] = 0\n sieve[0] = 0\n for i in range(2, int(math.sqrt(n) + 1)):\n if sieve[i] == 1:\n for j in range(i * i, n + 1, i):\n sieve[j] = 0\n return sieve\n\n\ndef odd_prime(a):\n while a != 0:\n y = a % 10\n if y == 3 or y == 5 or y == 7:\n return False\n else:\n a = a // 10\n return True\n\n\ndef main():\n t = int(input())\n for j in range(t):\n x = int(input())\n n = 75000\n arr = sieve(n)\n result = []\n final = []\n sum = 0\n for i in range(len(arr)):\n if arr[i] == 1:\n result.append(i)\n for i in range(len(result)):\n if odd_prime(result[i]):\n final.append(result[i])\n for i in range(x):\n sum = sum + final[i]\n print(sum)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import math\n\n\ndef sieve(n):\n sieve = [1] * (n + 1)\n sieve[1] = 0\n sieve[0] = 0\n for i in range(2, int(math.sqrt(n) + 1)):\n if sieve[i] == 1:\n for j in range(i * i, n + 1, i):\n sieve[j] = 0\n return sieve\n\n\ndef odd_prime(a):\n while a != 0:\n y = a % 10\n if y == 3 or y == 5 or y == 7:\n return False\n else:\n a = a // 10\n return True\n\n\ndef main():\n t = int(input())\n for j in range(t):\n x = int(input())\n n = 75000\n arr = sieve(n)\n result = []\n final = []\n sum = 0\n for i in range(len(arr)):\n if arr[i] == 1:\n result.append(i)\n for i in range(len(result)):\n if odd_prime(result[i]):\n final.append(result[i])\n for i in range(x):\n sum = sum + final[i]\n print(sum)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import math\n\n\ndef sieve(n):\n sieve = [1] * (n+1)\n sieve[1] = 0\n sieve[0] = 0\n for i in range(2, int(math.sqrt(n) + 1)):\n if sieve[i] == 1:\n for j in range(i*i, n + 1, i):\n sieve[j] = 0\n return sieve\ndef odd_prime(a):\n while a != 0:\n y = a % 10\n if y == 3 or y == 5 or y ==7:\n return False\n else:\n a = a // 10\n return True\n\ndef main():\n t = int(input())\n for j in range(t):\n x = int(input())\n n = 75000\n arr = sieve(n)\n result = []\n final = []\n sum = 0\n for i in range(len(arr)):\n if arr[i] == 1:\n result.append(i)\n for i in range(len(result)):\n if (odd_prime(result[i])):\n final.append(result[i])\n for i in range(x):\n sum = sum + final[i]\n print(sum)\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Foo:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __setitem__(self, key, value):
print(key, value)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
<|reserved_special_token_0|>
def __setitem__(self, key, value):
print(key, value)
def __delitem__(self, key):
print(key)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
def __getitem__(self, item):
return item + 10
def __setitem__(self, key, value):
print(key, value)
def __delitem__(self, key):
print(key)
<|reserved_special_token_0|>
print(result)
<|reserved_special_token_0|>
del obj[222]
<|reserved_special_token_1|>
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
def __getitem__(self, item):
return item + 10
def __setitem__(self, key, value):
print(key, value)
def __delitem__(self, key):
print(key)
obj = Foo('stnley', 25)
result = obj[555]
print(result)
obj[111] = 444
del obj[222]
<|reserved_special_token_1|>
# __author__: Stanley
# date: 2018/10/22
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
def __getitem__(self, item):
return item + 10
def __setitem__(self, key, value):
print(key, value)
def __delitem__(self, key):
print(key)
obj = Foo("stnley", 25)
# 自动执行obj对象的类中的__getitem__方法。555当作参数传递
result = obj[555]
print(result)
obj[111] = 444
del obj[222]
|
flexible
|
{
"blob_id": "d4b9403366a16dfbb12a2161a996e641b3a785a5",
"index": 8027,
"step-1": "class Foo:\n <mask token>\n <mask token>\n\n def __setitem__(self, key, value):\n print(key, value)\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n <mask token>\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\n<mask token>\n",
"step-3": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\n<mask token>\nprint(result)\n<mask token>\ndel obj[222]\n",
"step-4": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nobj = Foo('stnley', 25)\nresult = obj[555]\nprint(result)\nobj[111] = 444\ndel obj[222]\n",
"step-5": "# __author__: Stanley\n# date: 2018/10/22\n\nclass Foo:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nobj = Foo(\"stnley\", 25)\n# 自动执行obj对象的类中的__getitem__方法。555当作参数传递\nresult = obj[555]\nprint(result)\nobj[111] = 444\ndel obj[222]\n\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
from base import *
try:
from .prod_local import *
except:
pass
# we currently don't have an interface that allows an administrator
# to create a repository for another user. Until we have added this
# capability, allow users to create repos.
ELEMENTARY_ALLOW_REPO_CREATION = True
|
normal
|
{
"blob_id": "709271b98fc2b40c763522c54488be36968f02d8",
"index": 346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from .prod_local import *\nexcept:\n pass\n<mask token>\n",
"step-3": "<mask token>\ntry:\n from .prod_local import *\nexcept:\n pass\nELEMENTARY_ALLOW_REPO_CREATION = True\n",
"step-4": "from base import *\ntry:\n from .prod_local import *\nexcept:\n pass\nELEMENTARY_ALLOW_REPO_CREATION = True\n",
"step-5": "from base import *\n\ntry:\n from .prod_local import *\nexcept:\n pass\n\n# we currently don't have an interface that allows an administrator\n# to create a repository for another user. Until we have added this\n# capability, allow users to create repos.\nELEMENTARY_ALLOW_REPO_CREATION = True \n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_vmware import vim_util
def build_recursive_traversal_spec(client_factory):
# Recurse through all ResourcePools
rp_to_rp = client_factory.create('ns0:TraversalSpec')
rp_to_rp.name = 'rpToRp'
rp_to_rp.type = 'ResourcePool'
rp_to_rp.path = 'resourcePool'
rp_to_rp.skip = False
rp_to_vm = client_factory.create('ns0:TraversalSpec')
rp_to_vm.name = 'rpToVm'
rp_to_vm.type = 'ResourcePool'
rp_to_vm.path = 'vm'
rp_to_vm.skip = False
spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_resource_pool[0].name = 'rpToRp'
spec_array_resource_pool[1].name = 'rpToVm'
rp_to_rp.selectSet = spec_array_resource_pool
# Traversal through resource pool branch
cr_to_rp = client_factory.create('ns0:TraversalSpec')
cr_to_rp.name = 'crToRp'
cr_to_rp.type = 'ComputeResource'
cr_to_rp.path = 'resourcePool'
cr_to_rp.skip = False
spec_array_compute_resource = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_compute_resource[0].name = 'rpToRp'
spec_array_compute_resource[1].name = 'rpToVm'
cr_to_rp.selectSet = spec_array_compute_resource
# Traversal through host branch
cr_to_h = client_factory.create('ns0:TraversalSpec')
cr_to_h.name = 'crToH'
cr_to_h.type = 'ComputeResource'
cr_to_h.path = 'host'
cr_to_h.skip = False
# Traversal through hostFolder branch
dc_to_hf = client_factory.create('ns0:TraversalSpec')
dc_to_hf.name = 'dcToHf'
dc_to_hf.type = 'Datacenter'
dc_to_hf.path = 'hostFolder'
dc_to_hf.skip = False
spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_host[0].name = 'visitFolders'
dc_to_hf.selectSet = spec_array_datacenter_host
# Traversal through vmFolder branch
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
dc_to_vmf.name = 'dcToVmf'
dc_to_vmf.type = 'Datacenter'
dc_to_vmf.path = 'vmFolder'
dc_to_vmf.skip = False
spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_vm[0].name = 'visitFolders'
dc_to_vmf.selectSet = spec_array_datacenter_vm
# Traversal through datastore branch
dc_to_ds = client_factory.create('ns0:TraversalSpec')
dc_to_ds.name = 'dcToDs'
dc_to_ds.type = 'Datacenter'
dc_to_ds.path = 'datastore'
dc_to_ds.skip = False
spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]
spec_array_datacenter_ds[0].name = 'visitFolders'
dc_to_ds.selectSet = spec_array_datacenter_ds
# Recurse through all hosts
h_to_vm = client_factory.create('ns0:TraversalSpec')
h_to_vm.name = 'hToVm'
h_to_vm.type = 'HostSystem'
h_to_vm.path = 'vm'
h_to_vm.skip = False
spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_host_vm[0].name = 'visitFolders'
h_to_vm.selectSet = spec_array_host_vm
# Recurse through all datastores
ds_to_vm = client_factory.create('ns0:TraversalSpec')
ds_to_vm.name = 'dsToVm'
ds_to_vm.type = 'Datastore'
ds_to_vm.path = 'vm'
ds_to_vm.skip = False
spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]
spec_array_datastore_vm[0].name = 'visitFolders'
ds_to_vm.selectSet = spec_array_datastore_vm
# Recurse through the folders
visit_folders = client_factory.create('ns0:TraversalSpec')
visit_folders.name = 'visitFolders'
visit_folders.type = 'Folder'
visit_folders.path = 'childEntity'
visit_folders.skip = False
spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec'),
client_factory.create('ns0:SelectionSpec')]
spec_array_visit_folders[0].name = 'visitFolders'
spec_array_visit_folders[1].name = 'dcToHf'
spec_array_visit_folders[2].name = 'dcToVmf'
spec_array_visit_folders[3].name = 'crToH'
spec_array_visit_folders[4].name = 'crToRp'
spec_array_visit_folders[5].name = 'dcToDs'
spec_array_visit_folders[6].name = 'hToVm'
spec_array_visit_folders[7].name = 'dsToVm'
spec_array_visit_folders[8].name = 'rpToVm'
visit_folders.selectSet = spec_array_visit_folders
# Add all of them here
spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,
cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]
return spec_array
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return retrieve_properties_ex(vim,
usecoll,
[property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
properties = get_dynamic_properties(vim, mobj, [property_name], type)
property_value = None
if property_name in properties:
property_value = properties.get(property_name)
return property_value
def get_dynamic_properties(vim, mobj, property_names, obj_type=None):
"""Gets specific properties of the Managed Object."""
if not obj_type:
obj_type = mobj._type
obj_content = get_object_properties(
vim, None, mobj, obj_type, property_names)
properties = {}
if obj_content:
dynamic_properties = obj_content[0].propSet
for dynamic_property in dynamic_properties:
property_name = dynamic_property.name
property_value = dynamic_property.val
properties[property_name] = property_value
return properties
def retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):
"""Retrieve properties.
Retrieve properties using PropertyCollector.RetrievePropertiesEx
and PropertyCollector.ContinueRetrievePropertiesEx
args:
:param vim: Vim object
:param prop_coll: PropertyCollector MOR
:param max_count: Max num of objects returned in a single call.
"""
objcont = []
client_factory = vim.client.factory
opts = client_factory.create('ns0:RetrieveOptions')
opts.maxObjects = max_count
res = vim.RetrievePropertiesEx(prop_coll,
specSet=spec_set,
options=opts)
while True:
if res and res.objects:
objcont.extend(res.objects)
if hasattr(res, "token") and res.token:
res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)
else:
break
return objcont
def get_objects(vim, type, properties_to_collect=None, all=False):
"""Gets the list of objects of the type specified."""
if not properties_to_collect:
properties_to_collect = ["name"]
client_factory = vim.client.factory
trav_spec = vim_util.build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory,
vim.service_content.rootFolder,
[trav_spec])
property_spec = vim_util.build_property_spec(
client_factory, type_=type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
property_collector = vim.service_content.propertyCollector
return retrieve_properties_ex(vim,
property_collector,
[property_filter_spec])
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_property_filter_specs(vim, property_dict, objects=None):
client_factory = vim.client.factory
object_specs = []
if not objects:
objects = [vim.service_content.rootFolder]
for obj in objects:
if obj.value == get_root_folder_id(vim):
traversal_spec = [
vim_util.build_recursive_traversal_spec(client_factory)]
else:
traversal_spec = build_recursive_traversal_spec(client_factory)
object_spec = vim_util.build_object_spec(client_factory,
obj,
traversal_spec)
object_specs.append(object_spec)
property_specs = []
for obj_type in property_dict:
props = property_dict[obj_type]
property_spec = vim_util.build_property_spec(
client_factory, type_=obj_type, properties_to_collect=props)
property_specs.append(property_spec)
property_filter_spec = vim_util.build_property_filter_spec(client_factory,
property_specs,
object_specs)
return property_filter_spec
def create_filter(vim, prop_filter_spec, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreateFilter(collector,
spec=prop_filter_spec,
partialUpdates=False)
def create_property_collector(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CreatePropertyCollector(collector)
def destroy_property_collector(vim, collector):
if collector:
return vim.DestroyPropertyCollector(collector)
def wait_for_updates_ex(vim, version, collector=None,
max_wait=85, max_update_count=-1):
"""Polling mechanism for property collection
args:
:param vim: Vim object
:param version: version string
:param collector: PropertyCollector MOR
:param max_wait: Max time in seconds before the call returns
(Default set to 85 as 90 is the http socket timeout)
:param max_update_count: Max num of ObjectUpdates returned
in a single call. Not set if <= 0
"""
client_factory = vim.client.factory
waitopts = client_factory.create('ns0:WaitOptions')
waitopts.maxWaitSeconds = max_wait
if max_update_count > 0:
waitopts.maxObjectUpdates = max_update_count
if not collector:
collector = vim.service_content.propertyCollector
return vim.WaitForUpdatesEx(collector,
version=version,
options=waitopts)
def cancel_wait_for_updates(vim, collector=None):
if not collector:
collector = vim.service_content.propertyCollector
return vim.CancelWaitForUpdates(collector)
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
"""Gets the list of properties for the collection of objects."""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
return retrieve_properties_ex(vim,
vim.service_content.propertyCollector,
[prop_filter_spec])
def get_search_index(vim):
return vim.service_content.searchIndex
def find_by_inventory_path(vim, search_index, path):
return vim.FindByInventoryPath(search_index, inventoryPath=path)
def get_root_folder_id(vim):
return vim.service_content.rootFolder.value
def get_dv_switch_manager(vim):
"""Get reference of DistributedVirtualSwitchManager."""
return vim.service_content.dvSwitchManager
def get_dvs_mor_by_uuid(vim, uuid):
"""Query DVS by UUID."""
dvs_mgr = get_dv_switch_manager(vim)
return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)
|
normal
|
{
"blob_id": "de704bffe2e23a8a83d34204e325b7fb2454ef66",
"index": 133,
"step-1": "<mask token>\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\n<mask token>\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\n<mask token>\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\n<mask token>\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-2": "<mask token>\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\n<mask token>\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-3": "<mask token>\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\n<mask token>\n\n\ndef retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n \"\"\"Retrieve properties.\n\n Retrieve properties using PropertyCollector.RetrievePropertiesEx\n and PropertyCollector.ContinueRetrievePropertiesEx\n args:\n :param vim: Vim object\n :param prop_coll: PropertyCollector MOR\n :param max_count: Max num of objects returned in a single call.\n \"\"\"\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll, specSet=spec_set, options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, 'token') and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont\n\n\ndef get_objects(vim, type, properties_to_collect=None, all=False):\n \"\"\"Gets the list of objects of the type specified.\"\"\"\n if not properties_to_collect:\n properties_to_collect = ['name']\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, vim.\n service_content.rootFolder, [trav_spec])\n property_spec = vim_util.build_property_spec(client_factory, type_=type,\n properties_to_collect=properties_to_collect, all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec], [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim, property_collector, [\n property_filter_spec])\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-4": "from oslo_vmware import vim_util\n\n\ndef build_recursive_traversal_spec(client_factory):\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec'), client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'), client_factory.create(\n 'ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = properties is None or len(properties) == 0\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim, usecoll, [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\ndef get_dynamic_properties(vim, mobj, property_names, obj_type=None):\n \"\"\"Gets specific properties of the Managed Object.\"\"\"\n if not obj_type:\n obj_type = mobj._type\n obj_content = get_object_properties(vim, None, mobj, obj_type,\n property_names)\n properties = {}\n if obj_content:\n dynamic_properties = obj_content[0].propSet\n for dynamic_property in dynamic_properties:\n property_name = dynamic_property.name\n property_value = dynamic_property.val\n properties[property_name] = property_value\n return properties\n\n\ndef retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n \"\"\"Retrieve properties.\n\n Retrieve properties using PropertyCollector.RetrievePropertiesEx\n and PropertyCollector.ContinueRetrievePropertiesEx\n args:\n :param vim: Vim object\n :param prop_coll: PropertyCollector MOR\n :param max_count: Max num of objects returned in a single call.\n \"\"\"\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll, specSet=spec_set, options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, 'token') and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont\n\n\ndef get_objects(vim, type, properties_to_collect=None, all=False):\n \"\"\"Gets the list of objects of the type specified.\"\"\"\n if not properties_to_collect:\n properties_to_collect = ['name']\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, vim.\n service_content.rootFolder, [trav_spec])\n property_spec = vim_util.build_property_spec(client_factory, type_=type,\n properties_to_collect=properties_to_collect, all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec], [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim, property_collector, [\n property_filter_spec])\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [vim_util.build_recursive_traversal_spec(\n client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory, obj,\n traversal_spec)\n object_specs.append(object_spec)\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(client_factory, type_=\n obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs, object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector, spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None, max_wait=85,\n max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector, version=version, options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type, obj_list, properties\n ):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs,\n [prop_spec])\n return retrieve_properties_ex(vim, vim.service_content.\n propertyCollector, [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-5": "# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_vmware import vim_util\n\n\ndef build_recursive_traversal_spec(client_factory):\n # Recurse through all ResourcePools\n rp_to_rp = client_factory.create('ns0:TraversalSpec')\n rp_to_rp.name = 'rpToRp'\n rp_to_rp.type = 'ResourcePool'\n rp_to_rp.path = 'resourcePool'\n rp_to_rp.skip = False\n rp_to_vm = client_factory.create('ns0:TraversalSpec')\n rp_to_vm.name = 'rpToVm'\n rp_to_vm.type = 'ResourcePool'\n rp_to_vm.path = 'vm'\n rp_to_vm.skip = False\n spec_array_resource_pool = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_resource_pool[0].name = 'rpToRp'\n spec_array_resource_pool[1].name = 'rpToVm'\n rp_to_rp.selectSet = spec_array_resource_pool\n\n # Traversal through resource pool branch\n cr_to_rp = client_factory.create('ns0:TraversalSpec')\n cr_to_rp.name = 'crToRp'\n cr_to_rp.type = 'ComputeResource'\n cr_to_rp.path = 'resourcePool'\n cr_to_rp.skip = False\n spec_array_compute_resource = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_compute_resource[0].name = 'rpToRp'\n spec_array_compute_resource[1].name = 'rpToVm'\n cr_to_rp.selectSet = spec_array_compute_resource\n\n # Traversal through host branch\n cr_to_h = client_factory.create('ns0:TraversalSpec')\n cr_to_h.name = 'crToH'\n cr_to_h.type = 'ComputeResource'\n cr_to_h.path = 'host'\n cr_to_h.skip = False\n\n # Traversal through hostFolder branch\n dc_to_hf = client_factory.create('ns0:TraversalSpec')\n dc_to_hf.name = 'dcToHf'\n dc_to_hf.type = 'Datacenter'\n dc_to_hf.path = 'hostFolder'\n dc_to_hf.skip = False\n spec_array_datacenter_host = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_host[0].name = 'visitFolders'\n dc_to_hf.selectSet = spec_array_datacenter_host\n\n # Traversal through vmFolder branch\n dc_to_vmf = client_factory.create('ns0:TraversalSpec')\n dc_to_vmf.name = 'dcToVmf'\n dc_to_vmf.type = 'Datacenter'\n dc_to_vmf.path = 'vmFolder'\n dc_to_vmf.skip = False\n spec_array_datacenter_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_vm[0].name = 'visitFolders'\n dc_to_vmf.selectSet = spec_array_datacenter_vm\n\n # Traversal through datastore branch\n dc_to_ds = client_factory.create('ns0:TraversalSpec')\n dc_to_ds.name = 'dcToDs'\n dc_to_ds.type = 'Datacenter'\n dc_to_ds.path = 'datastore'\n dc_to_ds.skip = False\n spec_array_datacenter_ds = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datacenter_ds[0].name = 'visitFolders'\n dc_to_ds.selectSet = spec_array_datacenter_ds\n\n # Recurse through all hosts\n h_to_vm = client_factory.create('ns0:TraversalSpec')\n h_to_vm.name = 'hToVm'\n h_to_vm.type = 'HostSystem'\n h_to_vm.path = 'vm'\n h_to_vm.skip = False\n spec_array_host_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_host_vm[0].name = 'visitFolders'\n h_to_vm.selectSet = spec_array_host_vm\n\n # Recurse through all datastores\n ds_to_vm = client_factory.create('ns0:TraversalSpec')\n ds_to_vm.name = 'dsToVm'\n ds_to_vm.type = 'Datastore'\n ds_to_vm.path = 'vm'\n ds_to_vm.skip = False\n spec_array_datastore_vm = [client_factory.create('ns0:SelectionSpec')]\n spec_array_datastore_vm[0].name = 'visitFolders'\n ds_to_vm.selectSet = spec_array_datastore_vm\n\n # Recurse through the folders\n visit_folders = client_factory.create('ns0:TraversalSpec')\n visit_folders.name = 'visitFolders'\n visit_folders.type = 'Folder'\n visit_folders.path = 'childEntity'\n visit_folders.skip = False\n spec_array_visit_folders = [client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec'),\n client_factory.create('ns0:SelectionSpec')]\n spec_array_visit_folders[0].name = 'visitFolders'\n spec_array_visit_folders[1].name = 'dcToHf'\n spec_array_visit_folders[2].name = 'dcToVmf'\n spec_array_visit_folders[3].name = 'crToH'\n spec_array_visit_folders[4].name = 'crToRp'\n spec_array_visit_folders[5].name = 'dcToDs'\n spec_array_visit_folders[6].name = 'hToVm'\n spec_array_visit_folders[7].name = 'dsToVm'\n spec_array_visit_folders[8].name = 'rpToVm'\n visit_folders.selectSet = spec_array_visit_folders\n\n # Add all of them here\n spec_array = [visit_folders, dc_to_vmf, dc_to_ds, dc_to_hf, cr_to_h,\n cr_to_rp, rp_to_rp, h_to_vm, ds_to_vm, rp_to_vm]\n return spec_array\n\n\ndef get_object_properties(vim, collector, mobj, type, properties):\n \"\"\"Gets the properties of the Managed object specified.\"\"\"\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])\n\n\ndef get_dynamic_property(vim, mobj, type, property_name):\n \"\"\"Gets a particular property of the Managed Object.\"\"\"\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value\n\n\ndef get_dynamic_properties(vim, mobj, property_names, obj_type=None):\n \"\"\"Gets specific properties of the Managed Object.\"\"\"\n if not obj_type:\n obj_type = mobj._type\n obj_content = get_object_properties(\n vim, None, mobj, obj_type, property_names)\n properties = {}\n if obj_content:\n dynamic_properties = obj_content[0].propSet\n for dynamic_property in dynamic_properties:\n property_name = dynamic_property.name\n property_value = dynamic_property.val\n properties[property_name] = property_value\n return properties\n\n\ndef retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n \"\"\"Retrieve properties.\n\n Retrieve properties using PropertyCollector.RetrievePropertiesEx\n and PropertyCollector.ContinueRetrievePropertiesEx\n args:\n :param vim: Vim object\n :param prop_coll: PropertyCollector MOR\n :param max_count: Max num of objects returned in a single call.\n \"\"\"\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll,\n specSet=spec_set,\n options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, \"token\") and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont\n\n\ndef get_objects(vim, type, properties_to_collect=None, all=False):\n \"\"\"Gets the list of objects of the type specified.\"\"\"\n if not properties_to_collect:\n properties_to_collect = [\"name\"]\n\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n vim.service_content.rootFolder,\n [trav_spec])\n property_spec = vim_util.build_property_spec(\n client_factory, type_=type,\n properties_to_collect=properties_to_collect,\n all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec],\n [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim,\n property_collector,\n [property_filter_spec])\n\n\ndef get_prop_spec(client_factory, spec_type, properties):\n \"\"\"Builds the Property Spec Object.\"\"\"\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec\n\n\ndef get_obj_spec(client_factory, obj, select_set=None):\n \"\"\"Builds the Object Spec object.\"\"\"\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec\n\n\ndef get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n \"\"\"Builds the Property Filter Spec Object.\"\"\"\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec\n\n\ndef get_property_filter_specs(vim, property_dict, objects=None):\n client_factory = vim.client.factory\n object_specs = []\n if not objects:\n objects = [vim.service_content.rootFolder]\n for obj in objects:\n if obj.value == get_root_folder_id(vim):\n traversal_spec = [\n vim_util.build_recursive_traversal_spec(client_factory)]\n else:\n traversal_spec = build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n obj,\n traversal_spec)\n object_specs.append(object_spec)\n\n property_specs = []\n for obj_type in property_dict:\n props = property_dict[obj_type]\n property_spec = vim_util.build_property_spec(\n client_factory, type_=obj_type, properties_to_collect=props)\n property_specs.append(property_spec)\n\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n property_specs,\n object_specs)\n return property_filter_spec\n\n\ndef create_filter(vim, prop_filter_spec, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreateFilter(collector,\n spec=prop_filter_spec,\n partialUpdates=False)\n\n\ndef create_property_collector(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CreatePropertyCollector(collector)\n\n\ndef destroy_property_collector(vim, collector):\n if collector:\n return vim.DestroyPropertyCollector(collector)\n\n\ndef wait_for_updates_ex(vim, version, collector=None,\n max_wait=85, max_update_count=-1):\n \"\"\"Polling mechanism for property collection\n\n args:\n :param vim: Vim object\n :param version: version string\n :param collector: PropertyCollector MOR\n :param max_wait: Max time in seconds before the call returns\n (Default set to 85 as 90 is the http socket timeout)\n :param max_update_count: Max num of ObjectUpdates returned\n in a single call. Not set if <= 0\n \"\"\"\n client_factory = vim.client.factory\n waitopts = client_factory.create('ns0:WaitOptions')\n waitopts.maxWaitSeconds = max_wait\n if max_update_count > 0:\n waitopts.maxObjectUpdates = max_update_count\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.WaitForUpdatesEx(collector,\n version=version,\n options=waitopts)\n\n\ndef cancel_wait_for_updates(vim, collector=None):\n if not collector:\n collector = vim.service_content.propertyCollector\n return vim.CancelWaitForUpdates(collector)\n\n\ndef get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n \"\"\"Gets the list of properties for the collection of objects.\"\"\"\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])\n\n\ndef get_search_index(vim):\n return vim.service_content.searchIndex\n\n\ndef find_by_inventory_path(vim, search_index, path):\n return vim.FindByInventoryPath(search_index, inventoryPath=path)\n\n\ndef get_root_folder_id(vim):\n return vim.service_content.rootFolder.value\n\n\ndef get_dv_switch_manager(vim):\n \"\"\"Get reference of DistributedVirtualSwitchManager.\"\"\"\n return vim.service_content.dvSwitchManager\n\n\ndef get_dvs_mor_by_uuid(vim, uuid):\n \"\"\"Query DVS by UUID.\"\"\"\n dvs_mgr = get_dv_switch_manager(vim)\n return vim.QueryDvsByUuid(dvs_mgr, uuid=uuid)\n",
"step-ids": [
14,
18,
20,
22,
23
]
}
|
[
14,
18,
20,
22,
23
] |
<|reserved_special_token_0|>
class CLI(object):
<|reserved_special_token_0|>
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CLI(object):
def __init__(self):
parser = argparse.ArgumentParser(description=
'CMU Deep Learning Utilities')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)()
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CLI(object):
def __init__(self):
parser = argparse.ArgumentParser(description=
'CMU Deep Learning Utilities')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)()
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
CLI()
<|reserved_special_token_1|>
import argparse
import sys
import os
import cmudl.hw2p2 as hw2p2
class CLI(object):
def __init__(self):
parser = argparse.ArgumentParser(description=
'CMU Deep Learning Utilities')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)()
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
CLI()
<|reserved_special_token_1|>
#!/usr/bin/env python
import argparse
import sys
import os
import cmudl.hw2p2 as hw2p2
class CLI(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='CMU Deep Learning Utilities',
)
parser.add_argument('command', help='Subcommand to run')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
CLI()
|
flexible
|
{
"blob_id": "0f74e0f0600c373c3ddd470f18dbb86cf213fb58",
"index": 9257,
"step-1": "<mask token>\n\n\nclass CLI(object):\n <mask token>\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\n 'CMU Deep Learning Utilities')\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\n 'CMU Deep Learning Utilities')\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\nCLI()\n",
"step-4": "import argparse\nimport sys\nimport os\nimport cmudl.hw2p2 as hw2p2\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\n 'CMU Deep Learning Utilities')\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\nCLI()\n",
"step-5": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os\nimport cmudl.hw2p2 as hw2p2\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(\n description='CMU Deep Learning Utilities',\n )\n parser.add_argument('command', help='Subcommand to run')\n # parse_args defaults to [1:] for args, but you need to\n # exclude the rest of the args too, or validation will fail\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n # use dispatch pattern to invoke method with same name\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\nCLI()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class EntityEmailerInterface(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
email_medium = get_medium()
default_from_email = get_from_email_address()
for event, targets in email_medium.events_targets(seen=False,
mark_seen=True):
from_address = event.context.get('from_address'
) or default_from_email
Email.objects.create_email(event=event, from_address=
from_address, recipients=targets)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EntityEmailerInterface(object):
<|reserved_special_token_0|>
@staticmethod
def send_unsent_scheduled_emails():
"""
Send out any scheduled emails that are unsent
"""
current_time = datetime.utcnow()
email_medium = get_medium()
to_send = Email.objects.filter(scheduled__lte=current_time,
sent__isnull=True).select_related('event').prefetch_related(
'recipients')
context_loader.load_contexts_and_renderers([e.event for e in
to_send], [email_medium])
emails = []
for email in to_send:
to_email_addresses = get_subscribed_email_addresses(email)
if to_email_addresses:
text_message, html_message = email.render(email_medium)
message = create_email_message(to_emails=to_email_addresses,
from_email=email.from_address or get_from_email_address
(), subject=email.subject or
extract_email_subject_from_html_content(html_message),
text=text_message, html=html_message)
emails.append(message)
connection = mail.get_connection()
connection.send_messages(emails)
to_send.update(sent=current_time)
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
email_medium = get_medium()
default_from_email = get_from_email_address()
for event, targets in email_medium.events_targets(seen=False,
mark_seen=True):
from_address = event.context.get('from_address'
) or default_from_email
Email.objects.create_email(event=event, from_address=
from_address, recipients=targets)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EntityEmailerInterface(object):
"""
An api interface to do things within entity emailer
"""
@staticmethod
def send_unsent_scheduled_emails():
"""
Send out any scheduled emails that are unsent
"""
current_time = datetime.utcnow()
email_medium = get_medium()
to_send = Email.objects.filter(scheduled__lte=current_time,
sent__isnull=True).select_related('event').prefetch_related(
'recipients')
context_loader.load_contexts_and_renderers([e.event for e in
to_send], [email_medium])
emails = []
for email in to_send:
to_email_addresses = get_subscribed_email_addresses(email)
if to_email_addresses:
text_message, html_message = email.render(email_medium)
message = create_email_message(to_emails=to_email_addresses,
from_email=email.from_address or get_from_email_address
(), subject=email.subject or
extract_email_subject_from_html_content(html_message),
text=text_message, html=html_message)
emails.append(message)
connection = mail.get_connection()
connection.send_messages(emails)
to_send.update(sent=current_time)
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
email_medium = get_medium()
default_from_email = get_from_email_address()
for event, targets in email_medium.events_targets(seen=False,
mark_seen=True):
from_address = event.context.get('from_address'
) or default_from_email
Email.objects.create_email(event=event, from_address=
from_address, recipients=targets)
<|reserved_special_token_1|>
from datetime import datetime
from django.core import mail
from entity_event import context_loader
from entity_emailer.models import Email
from entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, create_email_message, extract_email_subject_from_html_content
class EntityEmailerInterface(object):
"""
An api interface to do things within entity emailer
"""
@staticmethod
def send_unsent_scheduled_emails():
"""
Send out any scheduled emails that are unsent
"""
current_time = datetime.utcnow()
email_medium = get_medium()
to_send = Email.objects.filter(scheduled__lte=current_time,
sent__isnull=True).select_related('event').prefetch_related(
'recipients')
context_loader.load_contexts_and_renderers([e.event for e in
to_send], [email_medium])
emails = []
for email in to_send:
to_email_addresses = get_subscribed_email_addresses(email)
if to_email_addresses:
text_message, html_message = email.render(email_medium)
message = create_email_message(to_emails=to_email_addresses,
from_email=email.from_address or get_from_email_address
(), subject=email.subject or
extract_email_subject_from_html_content(html_message),
text=text_message, html=html_message)
emails.append(message)
connection = mail.get_connection()
connection.send_messages(emails)
to_send.update(sent=current_time)
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
email_medium = get_medium()
default_from_email = get_from_email_address()
for event, targets in email_medium.events_targets(seen=False,
mark_seen=True):
from_address = event.context.get('from_address'
) or default_from_email
Email.objects.create_email(event=event, from_address=
from_address, recipients=targets)
<|reserved_special_token_1|>
from datetime import datetime
from django.core import mail
from entity_event import context_loader
from entity_emailer.models import Email
from entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, \
create_email_message, extract_email_subject_from_html_content
class EntityEmailerInterface(object):
"""
An api interface to do things within entity emailer
"""
@staticmethod
def send_unsent_scheduled_emails():
"""
Send out any scheduled emails that are unsent
"""
current_time = datetime.utcnow()
email_medium = get_medium()
to_send = Email.objects.filter(
scheduled__lte=current_time,
sent__isnull=True
).select_related(
'event'
).prefetch_related(
'recipients'
)
# Fetch the contexts of every event so that they may be rendered
context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])
emails = []
for email in to_send:
to_email_addresses = get_subscribed_email_addresses(email)
if to_email_addresses:
text_message, html_message = email.render(email_medium)
message = create_email_message(
to_emails=to_email_addresses,
from_email=email.from_address or get_from_email_address(),
subject=email.subject or extract_email_subject_from_html_content(html_message),
text=text_message,
html=html_message,
)
emails.append(message)
connection = mail.get_connection()
connection.send_messages(emails)
to_send.update(sent=current_time)
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
# Get the email medium
email_medium = get_medium()
# Get the default from email
default_from_email = get_from_email_address()
# Find any unseen events and create unsent email objects
for event, targets in email_medium.events_targets(seen=False, mark_seen=True):
# Check the event's context for a from_address, otherwise fallback to default
from_address = event.context.get('from_address') or default_from_email
# Create the emails
Email.objects.create_email(event=event, from_address=from_address, recipients=targets)
|
flexible
|
{
"blob_id": "d1dc807ecc92d9108db2c9bd00ee9781e174a1aa",
"index": 558,
"step-1": "<mask token>\n\n\nclass EntityEmailerInterface(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-2": "<mask token>\n\n\nclass EntityEmailerInterface(object):\n <mask token>\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-3": "<mask token>\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-4": "from datetime import datetime\nfrom django.core import mail\nfrom entity_event import context_loader\nfrom entity_emailer.models import Email\nfrom entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, create_email_message, extract_email_subject_from_html_content\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-5": "from datetime import datetime\n\nfrom django.core import mail\nfrom entity_event import context_loader\n\nfrom entity_emailer.models import Email\n\nfrom entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, \\\n create_email_message, extract_email_subject_from_html_content\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(\n scheduled__lte=current_time,\n sent__isnull=True\n ).select_related(\n 'event'\n ).prefetch_related(\n 'recipients'\n )\n\n # Fetch the contexts of every event so that they may be rendered\n context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])\n\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(\n to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address(),\n subject=email.subject or extract_email_subject_from_html_content(html_message),\n text=text_message,\n html=html_message,\n )\n emails.append(message)\n\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n\n # Get the email medium\n email_medium = get_medium()\n\n # Get the default from email\n default_from_email = get_from_email_address()\n\n # Find any unseen events and create unsent email objects\n for event, targets in email_medium.events_targets(seen=False, mark_seen=True):\n\n # Check the event's context for a from_address, otherwise fallback to default\n from_address = event.context.get('from_address') or default_from_email\n\n # Create the emails\n Email.objects.create_email(event=event, from_address=from_address, recipients=targets)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import NLC
app = NLC.create_app()
if __name__ == '__main__':
app.run(port=NLC.port, debug=True)
|
normal
|
{
"blob_id": "de2de26d0c82213393e8174d1144c3510c63b899",
"index": 2515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run(port=NLC.port, debug=True)\n",
"step-3": "<mask token>\napp = NLC.create_app()\nif __name__ == '__main__':\n app.run(port=NLC.port, debug=True)\n",
"step-4": "import NLC\napp = NLC.create_app()\nif __name__ == '__main__':\n app.run(port=NLC.port, debug=True)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
while True:
print("running")
|
normal
|
{
"blob_id": "8917481957ecd4c9692cfa93df0b759feaa344af",
"index": 4944,
"step-1": "<mask token>\n",
"step-2": "while True:\n print('running')\n",
"step-3": "while True:\n print(\"running\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Environment:
@abstractmethod
def __init__(self, agent):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Environment:
@abstractmethod
def __init__(self, agent):
pass
<|reserved_special_token_0|>
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Environment:
@abstractmethod
def __init__(self, agent):
pass
@abstractmethod
def execute_step(self, n=1):
pass
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
<|reserved_special_token_1|>
from abc import abstractmethod
class Environment:
@abstractmethod
def __init__(self, agent):
pass
@abstractmethod
def execute_step(self, n=1):
pass
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
|
flexible
|
{
"blob_id": "8698aedc5c8671f46c73898a7188440254b79bbf",
"index": 307,
"step-1": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <mask token>\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-3": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-4": "from abc import abstractmethod\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class Messenger:
<|reserved_special_token_0|>
def add_message(self, message):
self.message_manager.add(message)
@list_returner
def get_room_messages(self):
messages = []
i = 6
found_messages = []
for message in self.message_manager.find({}, self.messages_count):
found_messages.append(Message(**message))
for message in sorted(found_messages):
message_dict = message.as_dict()
message_dict['id'] = i
messages.append(message_dict)
i += 1
return messages
def read_messages(self):
output = list()
messages = self.get_room_messages()
output.append('[' + ', '.join([str(message).replace("'", '"') for
message in messages]) + ']')
return output
<|reserved_special_token_0|>
def update_message(self, old_message, message):
self.message_manager.update(old_message, message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Messenger:
def __init__(self, messages_count=20):
self.messages_count = messages_count
self.message_manager = DbManager()
def add_message(self, message):
self.message_manager.add(message)
@list_returner
def get_room_messages(self):
messages = []
i = 6
found_messages = []
for message in self.message_manager.find({}, self.messages_count):
found_messages.append(Message(**message))
for message in sorted(found_messages):
message_dict = message.as_dict()
message_dict['id'] = i
messages.append(message_dict)
i += 1
return messages
def read_messages(self):
output = list()
messages = self.get_room_messages()
output.append('[' + ', '.join([str(message).replace("'", '"') for
message in messages]) + ']')
return output
def delete_message(self, message):
try:
self.message_manager.delete(message)
except:
print('No message')
def update_message(self, old_message, message):
self.message_manager.update(old_message, message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def list_returner(f):
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if result:
return result
else:
return [dict()]
return wrapper
class Messenger:
def __init__(self, messages_count=20):
self.messages_count = messages_count
self.message_manager = DbManager()
def add_message(self, message):
self.message_manager.add(message)
@list_returner
def get_room_messages(self):
messages = []
i = 6
found_messages = []
for message in self.message_manager.find({}, self.messages_count):
found_messages.append(Message(**message))
for message in sorted(found_messages):
message_dict = message.as_dict()
message_dict['id'] = i
messages.append(message_dict)
i += 1
return messages
def read_messages(self):
output = list()
messages = self.get_room_messages()
output.append('[' + ', '.join([str(message).replace("'", '"') for
message in messages]) + ']')
return output
def delete_message(self, message):
try:
self.message_manager.delete(message)
except:
print('No message')
def update_message(self, old_message, message):
self.message_manager.update(old_message, message)
<|reserved_special_token_1|>
from dbmanager import DbManager
from message import Message
def list_returner(f):
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if result:
return result
else:
return [dict()]
return wrapper
class Messenger:
def __init__(self, messages_count=20):
self.messages_count = messages_count
self.message_manager = DbManager()
def add_message(self, message):
self.message_manager.add(message)
@list_returner
def get_room_messages(self):
messages = []
i = 6
found_messages = []
for message in self.message_manager.find({}, self.messages_count):
found_messages.append(Message(**message))
for message in sorted(found_messages):
message_dict = message.as_dict()
message_dict['id'] = i
messages.append(message_dict)
i += 1
return messages
def read_messages(self):
output = list()
messages = self.get_room_messages()
output.append('[' + ', '.join([str(message).replace("'", '"') for
message in messages]) + ']')
return output
def delete_message(self, message):
try:
self.message_manager.delete(message)
except:
print('No message')
def update_message(self, old_message, message):
self.message_manager.update(old_message, message)
|
flexible
|
{
"blob_id": "4d1ea6522a01603f0159a1f27da70b65c4f387cb",
"index": 7093,
"step-1": "<mask token>\n\n\nclass Messenger:\n <mask token>\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n <mask token>\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n",
"step-2": "<mask token>\n\n\nclass Messenger:\n\n def __init__(self, messages_count=20):\n self.messages_count = messages_count\n self.message_manager = DbManager()\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n\n def delete_message(self, message):\n try:\n self.message_manager.delete(message)\n except:\n print('No message')\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n",
"step-3": "<mask token>\n\n\ndef list_returner(f):\n\n def wrapper(*args, **kwargs):\n result = f(*args, **kwargs)\n if result:\n return result\n else:\n return [dict()]\n return wrapper\n\n\nclass Messenger:\n\n def __init__(self, messages_count=20):\n self.messages_count = messages_count\n self.message_manager = DbManager()\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n\n def delete_message(self, message):\n try:\n self.message_manager.delete(message)\n except:\n print('No message')\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n",
"step-4": "from dbmanager import DbManager\nfrom message import Message\n\n\ndef list_returner(f):\n\n def wrapper(*args, **kwargs):\n result = f(*args, **kwargs)\n if result:\n return result\n else:\n return [dict()]\n return wrapper\n\n\nclass Messenger:\n\n def __init__(self, messages_count=20):\n self.messages_count = messages_count\n self.message_manager = DbManager()\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n\n def delete_message(self, message):\n try:\n self.message_manager.delete(message)\n except:\n print('No message')\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n",
"step-5": null,
"step-ids": [
5,
7,
8,
9
]
}
|
[
5,
7,
8,
9
] |
<|reserved_special_token_0|>
class TestClass(unittest.TestCase):
<|reserved_special_token_0|>
def test_入力例_1(self):
input = '1 0 1\n2 1 2\n1 0 1'
output = 'Yes'
self.assertIO(input, output)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = '1 0 1\n2 1 2\n1 0 1'
output = 'Yes'
self.assertIO(input, output)
def test_入力例_2(self):
input = '2 2 2\n2 1 2\n2 2 2'
output = 'No'
self.assertIO(input, output)
def test_入力例_3(self):
input = '0 8 8\n0 8 8\n0 8 8'
output = 'Yes'
self.assertIO(input, output)
def test_入力例_4(self):
input = '1 8 6\n2 9 7\n0 7 7'
output = 'No'
self.assertIO(input, output)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = '1 0 1\n2 1 2\n1 0 1'
output = 'Yes'
self.assertIO(input, output)
def test_入力例_2(self):
input = '2 2 2\n2 1 2\n2 2 2'
output = 'No'
self.assertIO(input, output)
def test_入力例_3(self):
input = '0 8 8\n0 8 8\n0 8 8'
output = 'Yes'
self.assertIO(input, output)
def test_入力例_4(self):
input = '1 8 6\n2 9 7\n0 7 7'
output = 'No'
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1
] and a3 + b3 == c[2][2]:
print('Yes')
else:
print('No')
if __name__ == '__main__':
resolve()
<|reserved_special_token_1|>
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = '1 0 1\n2 1 2\n1 0 1'
output = 'Yes'
self.assertIO(input, output)
def test_入力例_2(self):
input = '2 2 2\n2 1 2\n2 2 2'
output = 'No'
self.assertIO(input, output)
def test_入力例_3(self):
input = '0 8 8\n0 8 8\n0 8 8'
output = 'Yes'
self.assertIO(input, output)
def test_入力例_4(self):
input = '1 8 6\n2 9 7\n0 7 7'
output = 'No'
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1
] and a3 + b3 == c[2][2]:
print('Yes')
else:
print('No')
if __name__ == '__main__':
resolve()
<|reserved_special_token_1|>
#
# abc088 c
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """1 0 1
2 1 2
1 0 1"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2 2 2
2 1 2
2 2 2"""
output = """No"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """0 8 8
0 8 8
0 8 8"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """1 8 6
2 9 7
0 7 7"""
output = """No"""
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
# unittest.main()
resolve()
|
flexible
|
{
"blob_id": "8b97c1e14adfcb09806e2d37e2f5c4f0b356c009",
"index": 2742,
"step-1": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n <mask token>\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1\n ] and a3 + b3 == c[2][2]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-4": "import sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1\n ] and a3 + b3 == c[2][2]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-5": "#\n# abc088 c\n#\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"1 0 1\n2 1 2\n1 0 1\"\"\"\n output = \"\"\"Yes\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"2 2 2\n2 1 2\n2 2 2\"\"\"\n output = \"\"\"No\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"0 8 8\n0 8 8\n0 8 8\"\"\"\n output = \"\"\"Yes\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = \"\"\"1 8 6\n2 9 7\n0 7 7\"\"\"\n output = \"\"\"No\"\"\"\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n\n if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\nif __name__ == \"__main__\":\n # unittest.main()\n resolve()\n",
"step-ids": [
2,
6,
8,
9,
10
]
}
|
[
2,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('list/', MusicListView, name='music_list'), path(
'play/<str:name>/', MusicPlayView, name='play_music'), path('pause/',
MusicPauseView, name='pause_music'), path('unpause/', MusicUnPauseView,
name='unpause_music'), path('nextsong/', NextSongView, name='next_song'
), path('prevsong/', PreviousSongView, name='previous_song')]
<|reserved_special_token_1|>
from django.urls import path
from player.views import MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView, NextSongView, PreviousSongView
urlpatterns = [path('list/', MusicListView, name='music_list'), path(
'play/<str:name>/', MusicPlayView, name='play_music'), path('pause/',
MusicPauseView, name='pause_music'), path('unpause/', MusicUnPauseView,
name='unpause_music'), path('nextsong/', NextSongView, name='next_song'
), path('prevsong/', PreviousSongView, name='previous_song')]
<|reserved_special_token_1|>
from django.urls import path
from player.views import (
MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView,
NextSongView, PreviousSongView
)
urlpatterns = [
path('list/', MusicListView, name="music_list"),
path('play/<str:name>/', MusicPlayView, name="play_music"),
path('pause/', MusicPauseView, name="pause_music"),
path('unpause/', MusicUnPauseView, name="unpause_music"),
path('nextsong/', NextSongView, name="next_song"),
path('prevsong/', PreviousSongView, name="previous_song"),
]
|
flexible
|
{
"blob_id": "f23b002ec0eefa376890e255b1ac0137e3a1c989",
"index": 5338,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('list/', MusicListView, name='music_list'), path(\n 'play/<str:name>/', MusicPlayView, name='play_music'), path('pause/',\n MusicPauseView, name='pause_music'), path('unpause/', MusicUnPauseView,\n name='unpause_music'), path('nextsong/', NextSongView, name='next_song'\n ), path('prevsong/', PreviousSongView, name='previous_song')]\n",
"step-3": "from django.urls import path\nfrom player.views import MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView, NextSongView, PreviousSongView\nurlpatterns = [path('list/', MusicListView, name='music_list'), path(\n 'play/<str:name>/', MusicPlayView, name='play_music'), path('pause/',\n MusicPauseView, name='pause_music'), path('unpause/', MusicUnPauseView,\n name='unpause_music'), path('nextsong/', NextSongView, name='next_song'\n ), path('prevsong/', PreviousSongView, name='previous_song')]\n",
"step-4": "from django.urls import path\n\nfrom player.views import (\n MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView,\n NextSongView, PreviousSongView\n)\n\nurlpatterns = [\n path('list/', MusicListView, name=\"music_list\"),\n path('play/<str:name>/', MusicPlayView, name=\"play_music\"),\n path('pause/', MusicPauseView, name=\"pause_music\"),\n path('unpause/', MusicUnPauseView, name=\"unpause_music\"),\n path('nextsong/', NextSongView, name=\"next_song\"),\n path('prevsong/', PreviousSongView, name=\"previous_song\"),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
for i in range(0, k):
distance_scores = []
if i != 0:
for j in new_centroids:
distances = []
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = []
cluster_dict = {}
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
try:
data.insert(6, 'ClusterID', cluster_ids)
except ValueError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
except IndexError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
return cluster_ids
def recalculate_clusters():
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
for i in range(0, k):
distance_scores = []
if i != 0:
for j in new_centroids:
distances = []
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = []
cluster_dict = {}
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
try:
data.insert(6, 'ClusterID', cluster_ids)
except ValueError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
except IndexError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
return cluster_ids
def recalculate_clusters():
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
<|reserved_special_token_0|>
for i in data.iterrows():
try:
tree_count_dict[i[1]['tree_genus']]
except KeyError:
tree_count_dict[i[1]['tree_genus']] = counter
counter += 1
<|reserved_special_token_0|>
print(data)
<|reserved_special_token_0|>
for iterations in range(0, 100):
print('Clustering Progress: [', iterations + 1, '/ 100 ]')
previous_assignments = cluster_assignments.copy()
cluster_assignments = assign_centroids()
recalculate_clusters()
if previous_assignments == cluster_assignments and len(previous_assignments
) > 0:
unchanged_iteration_count += 1
else:
unchanged_iteration_count = 0
if unchanged_iteration_count > 3:
print(
"Exiting early: cluster assignments haven't changed in 3 iterations"
)
break
print("""
Cluster Counts ( k =""", k, '):')
for i in range(0, k):
print('Cluster', i + 1, ': ', cluster_assignments.count(i))
print('\n\n', data)
data.to_csv('./data/fire_data_2011_clustered.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
for i in range(0, k):
distance_scores = []
if i != 0:
for j in new_centroids:
distances = []
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = []
cluster_dict = {}
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
try:
data.insert(6, 'ClusterID', cluster_ids)
except ValueError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
except IndexError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
return cluster_ids
def recalculate_clusters():
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
data = pd.read_csv('data/fire_data_2011.csv')
counter = 0
tree_count_dict = {}
for i in data.iterrows():
try:
tree_count_dict[i[1]['tree_genus']]
except KeyError:
tree_count_dict[i[1]['tree_genus']] = counter
counter += 1
data = data.copy().replace(to_replace=tree_count_dict)
print(data)
k = 7
num_rows = data.iloc[-1].name
centroids = choose_centroids(data.copy())
cluster_assignments = []
unchanged_iteration_count = 0
for iterations in range(0, 100):
print('Clustering Progress: [', iterations + 1, '/ 100 ]')
previous_assignments = cluster_assignments.copy()
cluster_assignments = assign_centroids()
recalculate_clusters()
if previous_assignments == cluster_assignments and len(previous_assignments
) > 0:
unchanged_iteration_count += 1
else:
unchanged_iteration_count = 0
if unchanged_iteration_count > 3:
print(
"Exiting early: cluster assignments haven't changed in 3 iterations"
)
break
print("""
Cluster Counts ( k =""", k, '):')
for i in range(0, k):
print('Cluster', i + 1, ': ', cluster_assignments.count(i))
print('\n\n', data)
data.to_csv('./data/fire_data_2011_clustered.csv')
<|reserved_special_token_1|>
import pandas as pd
import random
import math
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
for i in range(0, k):
distance_scores = []
if i != 0:
for j in new_centroids:
distances = []
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = []
cluster_dict = {}
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
try:
data.insert(6, 'ClusterID', cluster_ids)
except ValueError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
except IndexError:
data.drop(columns='ClusterID', axis=1, inplace=True)
data.insert(6, 'ClusterID', cluster_ids)
return cluster_ids
def recalculate_clusters():
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
data = pd.read_csv('data/fire_data_2011.csv')
counter = 0
tree_count_dict = {}
for i in data.iterrows():
try:
tree_count_dict[i[1]['tree_genus']]
except KeyError:
tree_count_dict[i[1]['tree_genus']] = counter
counter += 1
data = data.copy().replace(to_replace=tree_count_dict)
print(data)
k = 7
num_rows = data.iloc[-1].name
centroids = choose_centroids(data.copy())
cluster_assignments = []
unchanged_iteration_count = 0
for iterations in range(0, 100):
print('Clustering Progress: [', iterations + 1, '/ 100 ]')
previous_assignments = cluster_assignments.copy()
cluster_assignments = assign_centroids()
recalculate_clusters()
if previous_assignments == cluster_assignments and len(previous_assignments
) > 0:
unchanged_iteration_count += 1
else:
unchanged_iteration_count = 0
if unchanged_iteration_count > 3:
print(
"Exiting early: cluster assignments haven't changed in 3 iterations"
)
break
print("""
Cluster Counts ( k =""", k, '):')
for i in range(0, k):
print('Cluster', i + 1, ': ', cluster_assignments.count(i))
print('\n\n', data)
data.to_csv('./data/fire_data_2011_clustered.csv')
<|reserved_special_token_1|>
import pandas as pd
import random
import math
# takes 2 row series and calculates the distances between them
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
# takes copy of dataframe; returns initialized centroid array
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
# randomly picks k centroids
for i in range(0, k):
distance_scores = []
# picks furthest centroid from each other if the first one has been picked; else picks a random initial point
if i != 0:
for j in new_centroids:
distances = []
# for j existing centroids, compare to all other points and selects from all of j for next centroid
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
# drops centroid from copied dataframe to avoid duplicates
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
# appends the newly selected centroid to the list
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = [] # array for storing column output
cluster_dict = {} # dict for mapping centroid IDs (i.e. 89, 102, 34, etc.) to (0, 1, 2, ..., k)
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1 # crude way of assigning centroid IDs
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
# inserts cluster assignment column;
# if column already exists, catches exception and removes the column before insertion
try:
data.insert(6, "ClusterID", cluster_ids)
except ValueError:
data.drop(columns="ClusterID", axis=1, inplace=True)
data.insert(6, "ClusterID", cluster_ids)
except IndexError:
data.drop(columns="ClusterID", axis=1, inplace=True)
data.insert(6, "ClusterID", cluster_ids)
return cluster_ids
def recalculate_clusters():
# for k centroids, take the mean of all values belonging to the centroid and make that point the new centroid
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
data = pd.read_csv("data/fire_data_2011.csv")
# uses a dict to convert from tree genus i.e. "Pinu", "Pice",... to 0, 1,...
counter = 0
tree_count_dict = {}
for i in data.iterrows():
try:
tree_count_dict[i[1]["tree_genus"]]
except KeyError:
tree_count_dict[i[1]["tree_genus"]] = counter
counter += 1
data = data.copy().replace(to_replace=tree_count_dict)
print(data)
k = 7
num_rows = data.iloc[-1].name # gets label of the last row to figure out how many instances are in the data
# giving temporary copy of data so selected values can be removed so there aren't duplicate centroids
centroids = choose_centroids(data.copy())
cluster_assignments = []
unchanged_iteration_count = 0
for iterations in range(0, 100):
print("Clustering Progress: [", iterations + 1, "/ 100 ]")
# update previous cluster assignments; reassign cluster IDs and recalculate centroids
previous_assignments = cluster_assignments.copy()
cluster_assignments = assign_centroids()
recalculate_clusters()
# checks if cluster assignments have changed from one iteration to another
if previous_assignments == cluster_assignments and len(previous_assignments) > 0:
unchanged_iteration_count += 1
else:
unchanged_iteration_count = 0
# if cluster assignments haven't changed in 3 iterations, break from loop and exit
if unchanged_iteration_count > 3:
print("Exiting early: cluster assignments haven't changed in 3 iterations")
break
print("\nCluster Counts ( k =", k, "):")
for i in range(0, k):
print("Cluster", i + 1, ": ", cluster_assignments.count(i))
print("\n\n", data)
data.to_csv("./data/fire_data_2011_clustered.csv")
|
flexible
|
{
"blob_id": "46b51f46f6ed73e3b9dc2f759535ba71facd2aae",
"index": 5712,
"step-1": "<mask token>\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\n<mask token>\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1]['tree_genus']]\n except KeyError:\n tree_count_dict[i[1]['tree_genus']] = counter\n counter += 1\n<mask token>\nprint(data)\n<mask token>\nfor iterations in range(0, 100):\n print('Clustering Progress: [', iterations + 1, '/ 100 ]')\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n if previous_assignments == cluster_assignments and len(previous_assignments\n ) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n if unchanged_iteration_count > 3:\n print(\n \"Exiting early: cluster assignments haven't changed in 3 iterations\"\n )\n break\nprint(\"\"\"\nCluster Counts ( k =\"\"\", k, '):')\nfor i in range(0, k):\n print('Cluster', i + 1, ': ', cluster_assignments.count(i))\nprint('\\n\\n', data)\ndata.to_csv('./data/fire_data_2011_clustered.csv')\n",
"step-3": "<mask token>\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\ndata = pd.read_csv('data/fire_data_2011.csv')\ncounter = 0\ntree_count_dict = {}\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1]['tree_genus']]\n except KeyError:\n tree_count_dict[i[1]['tree_genus']] = counter\n counter += 1\ndata = data.copy().replace(to_replace=tree_count_dict)\nprint(data)\nk = 7\nnum_rows = data.iloc[-1].name\ncentroids = choose_centroids(data.copy())\ncluster_assignments = []\nunchanged_iteration_count = 0\nfor iterations in range(0, 100):\n print('Clustering Progress: [', iterations + 1, '/ 100 ]')\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n if previous_assignments == cluster_assignments and len(previous_assignments\n ) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n if unchanged_iteration_count > 3:\n print(\n \"Exiting early: cluster assignments haven't changed in 3 iterations\"\n )\n break\nprint(\"\"\"\nCluster Counts ( k =\"\"\", k, '):')\nfor i in range(0, k):\n print('Cluster', i + 1, ': ', cluster_assignments.count(i))\nprint('\\n\\n', data)\ndata.to_csv('./data/fire_data_2011_clustered.csv')\n",
"step-4": "import pandas as pd\nimport random\nimport math\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\ndata = pd.read_csv('data/fire_data_2011.csv')\ncounter = 0\ntree_count_dict = {}\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1]['tree_genus']]\n except KeyError:\n tree_count_dict[i[1]['tree_genus']] = counter\n counter += 1\ndata = data.copy().replace(to_replace=tree_count_dict)\nprint(data)\nk = 7\nnum_rows = data.iloc[-1].name\ncentroids = choose_centroids(data.copy())\ncluster_assignments = []\nunchanged_iteration_count = 0\nfor iterations in range(0, 100):\n print('Clustering Progress: [', iterations + 1, '/ 100 ]')\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n if previous_assignments == cluster_assignments and len(previous_assignments\n ) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n if unchanged_iteration_count > 3:\n print(\n \"Exiting early: cluster assignments haven't changed in 3 iterations\"\n )\n break\nprint(\"\"\"\nCluster Counts ( k =\"\"\", k, '):')\nfor i in range(0, k):\n print('Cluster', i + 1, ': ', cluster_assignments.count(i))\nprint('\\n\\n', data)\ndata.to_csv('./data/fire_data_2011_clustered.csv')\n",
"step-5": "import pandas as pd\nimport random\nimport math\n\n\n# takes 2 row series and calculates the distances between them\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n\n return math.sqrt(dist)\n\n\n# takes copy of dataframe; returns initialized centroid array\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n\n # randomly picks k centroids\n for i in range(0, k):\n distance_scores = []\n\n # picks furthest centroid from each other if the first one has been picked; else picks a random initial point\n if i != 0:\n for j in new_centroids:\n distances = []\n\n # for j existing centroids, compare to all other points and selects from all of j for next centroid\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n\n distances.sort()\n distance_scores.append(distances[-1])\n\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n\n else:\n centroid_index = random.randrange(num_rows)\n\n # drops centroid from copied dataframe to avoid duplicates\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n\n # appends the newly selected centroid to the list\n new_centroids.append(data.iloc[centroid_index])\n\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = [] # array for storing column output\n cluster_dict = {} # dict for mapping centroid IDs (i.e. 89, 102, 34, etc.) to (0, 1, 2, ..., k)\n counter = 0\n\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1 # crude way of assigning centroid IDs\n\n for row in data.iterrows():\n distances = []\n\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n\n # inserts cluster assignment column;\n # if column already exists, catches exception and removes the column before insertion\n try:\n data.insert(6, \"ClusterID\", cluster_ids)\n except ValueError:\n data.drop(columns=\"ClusterID\", axis=1, inplace=True)\n data.insert(6, \"ClusterID\", cluster_ids)\n except IndexError:\n data.drop(columns=\"ClusterID\", axis=1, inplace=True)\n data.insert(6, \"ClusterID\", cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n # for k centroids, take the mean of all values belonging to the centroid and make that point the new centroid\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\ndata = pd.read_csv(\"data/fire_data_2011.csv\")\n\n# uses a dict to convert from tree genus i.e. \"Pinu\", \"Pice\",... to 0, 1,...\ncounter = 0\ntree_count_dict = {}\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1][\"tree_genus\"]]\n except KeyError:\n tree_count_dict[i[1][\"tree_genus\"]] = counter\n counter += 1\n\ndata = data.copy().replace(to_replace=tree_count_dict)\nprint(data)\n\nk = 7\nnum_rows = data.iloc[-1].name # gets label of the last row to figure out how many instances are in the data\n\n# giving temporary copy of data so selected values can be removed so there aren't duplicate centroids\ncentroids = choose_centroids(data.copy())\n\ncluster_assignments = []\nunchanged_iteration_count = 0\n\nfor iterations in range(0, 100):\n print(\"Clustering Progress: [\", iterations + 1, \"/ 100 ]\")\n\n # update previous cluster assignments; reassign cluster IDs and recalculate centroids\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n\n # checks if cluster assignments have changed from one iteration to another\n if previous_assignments == cluster_assignments and len(previous_assignments) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n\n # if cluster assignments haven't changed in 3 iterations, break from loop and exit\n if unchanged_iteration_count > 3:\n print(\"Exiting early: cluster assignments haven't changed in 3 iterations\")\n break\n\nprint(\"\\nCluster Counts ( k =\", k, \"):\")\nfor i in range(0, k):\n print(\"Cluster\", i + 1, \": \", cluster_assignments.count(i))\n\nprint(\"\\n\\n\", data)\n\ndata.to_csv(\"./data/fire_data_2011_clustered.csv\")\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import tensorflow as tf
def makeMnistModel():
mnist = tf.keras.datasets.mnist
(X_train, y_train), (_, _) = mnist.load_data()
X_train = X_train / 255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape
=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras
.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=5)
model.save('./mnist_model.h5')
makeMnistModel()
|
normal
|
{
"blob_id": "1555583cd3d8938cbaeeac2d1f74bb9c3858f26d",
"index": 4207,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef makeMnistModel():\n mnist = tf.keras.datasets.mnist\n (X_train, y_train), (_, _) = mnist.load_data()\n X_train = X_train / 255.0\n model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape\n =(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras\n .layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n model.fit(X_train, y_train, epochs=5)\n model.save('./mnist_model.h5')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef makeMnistModel():\n mnist = tf.keras.datasets.mnist\n (X_train, y_train), (_, _) = mnist.load_data()\n X_train = X_train / 255.0\n model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape\n =(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras\n .layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n model.fit(X_train, y_train, epochs=5)\n model.save('./mnist_model.h5')\n\n\nmakeMnistModel()\n",
"step-4": "import tensorflow as tf\n\n\ndef makeMnistModel():\n mnist = tf.keras.datasets.mnist\n (X_train, y_train), (_, _) = mnist.load_data()\n X_train = X_train / 255.0\n model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape\n =(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras\n .layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n model.fit(X_train, y_train, epochs=5)\n model.save('./mnist_model.h5')\n\n\nmakeMnistModel()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SetupGuideLandingPageTests(WagtailPageTests):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
<|reserved_special_token_0|>
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
<|reserved_special_token_1|>
from wagtail.tests.utils import WagtailPageTests
from setup_guide.models import SetupGuideLandingPage, SetupGuidePage
from home.models import HomePage
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
<|reserved_special_token_1|>
from wagtail.tests.utils import WagtailPageTests
from setup_guide.models import SetupGuideLandingPage, SetupGuidePage
from home.models import HomePage
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
# A SetupGuidePage can only have other SetupGuidePage children
self.assertAllowedSubpageTypes(
SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
|
flexible
|
{
"blob_id": "5fdcbccb99880da79eb0efbdecd328ca1cf73d7f",
"index": 1415,
"step-1": "<mask token>\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n <mask token>\n <mask token>\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-2": "<mask token>\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n <mask token>\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-3": "<mask token>\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n\n def test_setup_guide_page_subpages(self):\n self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-4": "from wagtail.tests.utils import WagtailPageTests\nfrom setup_guide.models import SetupGuideLandingPage, SetupGuidePage\nfrom home.models import HomePage\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n\n def test_setup_guide_page_subpages(self):\n self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-5": "from wagtail.tests.utils import WagtailPageTests\nfrom setup_guide.models import SetupGuideLandingPage, SetupGuidePage\nfrom home.models import HomePage\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n\n def test_setup_guide_page_subpages(self):\n # A SetupGuidePage can only have other SetupGuidePage children\n self.assertAllowedSubpageTypes(\n SetupGuideLandingPage, {SetupGuidePage})\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name=
'admin_panel'), path('admin_panel/connection/', views.Connection.
as_view(), name='connect_group-teacher'), path(
'admin_panel/connection/<str:choiced_departament>', views.Connection.
as_view(), name='connect_group-teacher')]
<|reserved_special_token_1|>
from django.urls import path
from admin_panel import views
urlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name=
'admin_panel'), path('admin_panel/connection/', views.Connection.
as_view(), name='connect_group-teacher'), path(
'admin_panel/connection/<str:choiced_departament>', views.Connection.
as_view(), name='connect_group-teacher')]
|
flexible
|
{
"blob_id": "34a7fd66a9e2eae25994336f22a76c24c11a6e1b",
"index": 7408,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name=\n 'admin_panel'), path('admin_panel/connection/', views.Connection.\n as_view(), name='connect_group-teacher'), path(\n 'admin_panel/connection/<str:choiced_departament>', views.Connection.\n as_view(), name='connect_group-teacher')]\n",
"step-3": "from django.urls import path\nfrom admin_panel import views\nurlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name=\n 'admin_panel'), path('admin_panel/connection/', views.Connection.\n as_view(), name='connect_group-teacher'), path(\n 'admin_panel/connection/<str:choiced_departament>', views.Connection.\n as_view(), name='connect_group-teacher')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def array_rebin(data, shape):
"""
Rebin a multidimensional array
Args:
data (array): The input array
shape (tuple): The new shape
"""
assert data.ndim == len(shape)
assert data.shape[0] % shape[0] == 0
assert data.shape[1] % shape[1] == 0
assert data.shape[2] % shape[2] == 0
factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])
data = data.reshape(factors.flatten())
for i in range(len(shape)):
data = data.sum(-1 * (i + 1))
return data
def mapfile_rebin(input_filename, output_filename, shape=None):
"""
Rebin the map
Args:
input_filename (str): The input map filename
output_filename (str): The output map filename
shape (tuple): The new shape of the map
"""
infile = read(input_filename)
data = infile.data
logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(
shape)))
data = array_rebin(data, shape)
outfile = write(output_filename, data, infile=infile)
outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0
], outfile.voxel_size['y'] * data.shape[1] // shape[1
], outfile.voxel_size['x'] * data.shape[2] // shape[2]
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
def array_rebin(data, shape):
"""
Rebin a multidimensional array
Args:
data (array): The input array
shape (tuple): The new shape
"""
assert data.ndim == len(shape)
assert data.shape[0] % shape[0] == 0
assert data.shape[1] % shape[1] == 0
assert data.shape[2] % shape[2] == 0
factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])
data = data.reshape(factors.flatten())
for i in range(len(shape)):
data = data.sum(-1 * (i + 1))
return data
def mapfile_rebin(input_filename, output_filename, shape=None):
"""
Rebin the map
Args:
input_filename (str): The input map filename
output_filename (str): The output map filename
shape (tuple): The new shape of the map
"""
infile = read(input_filename)
data = infile.data
logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(
shape)))
data = array_rebin(data, shape)
outfile = write(output_filename, data, infile=infile)
outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0
], outfile.voxel_size['y'] * data.shape[1] // shape[1
], outfile.voxel_size['x'] * data.shape[2] // shape[2]
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
<|reserved_special_token_1|>
import logging
import numpy
from maptools.util import read, write
logger = logging.getLogger(__name__)
def array_rebin(data, shape):
"""
Rebin a multidimensional array
Args:
data (array): The input array
shape (tuple): The new shape
"""
assert data.ndim == len(shape)
assert data.shape[0] % shape[0] == 0
assert data.shape[1] % shape[1] == 0
assert data.shape[2] % shape[2] == 0
factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])
data = data.reshape(factors.flatten())
for i in range(len(shape)):
data = data.sum(-1 * (i + 1))
return data
def mapfile_rebin(input_filename, output_filename, shape=None):
"""
Rebin the map
Args:
input_filename (str): The input map filename
output_filename (str): The output map filename
shape (tuple): The new shape of the map
"""
infile = read(input_filename)
data = infile.data
logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(
shape)))
data = array_rebin(data, shape)
outfile = write(output_filename, data, infile=infile)
outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0
], outfile.voxel_size['y'] * data.shape[1] // shape[1
], outfile.voxel_size['x'] * data.shape[2] // shape[2]
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
<|reserved_special_token_1|>
#
# Copyright (C) 2020 RFI
#
# Author: James Parkhurst
#
# This code is distributed under the GPLv3 license, a copy of
# which is included in the root directory of this package.
#
import logging
import numpy
from maptools.util import read, write
# Get the logger
logger = logging.getLogger(__name__)
def array_rebin(data, shape):
"""
Rebin a multidimensional array
Args:
data (array): The input array
shape (tuple): The new shape
"""
# Ensure dimensions are consistent
assert data.ndim == len(shape)
assert data.shape[0] % shape[0] == 0
assert data.shape[1] % shape[1] == 0
assert data.shape[2] % shape[2] == 0
# Get pairs of (shape, bin factor) for each dimension
factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])
# Rebin the array
data = data.reshape(factors.flatten())
for i in range(len(shape)):
data = data.sum(-1 * (i + 1))
return data
def mapfile_rebin(input_filename, output_filename, shape=None):
"""
Rebin the map
Args:
input_filename (str): The input map filename
output_filename (str): The output map filename
shape (tuple): The new shape of the map
"""
# Open the input file
infile = read(input_filename)
# Get the data
data = infile.data
# Get the subset of data
logger.info("Resampling map from shape %s to %s" % (data.shape, tuple(shape)))
data = array_rebin(data, shape)
# Write the output file
outfile = write(output_filename, data, infile=infile)
# Update the voxel size
outfile.voxel_size = (
outfile.voxel_size["z"] * data.shape[0] // shape[0],
outfile.voxel_size["y"] * data.shape[1] // shape[1],
outfile.voxel_size["x"] * data.shape[2] // shape[2],
)
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == "str" or "input_filename" in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
|
flexible
|
{
"blob_id": "18dc01f3e1672407800e53d80a85ffc8d5b86c17",
"index": 7497,
"step-1": "<mask token>\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-2": "<mask token>\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n infile = read(input_filename)\n data = infile.data\n logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(\n shape)))\n data = array_rebin(data, shape)\n outfile = write(output_filename, data, infile=infile)\n outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0\n ], outfile.voxel_size['y'] * data.shape[1] // shape[1\n ], outfile.voxel_size['x'] * data.shape[2] // shape[2]\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n infile = read(input_filename)\n data = infile.data\n logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(\n shape)))\n data = array_rebin(data, shape)\n outfile = write(output_filename, data, infile=infile)\n outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0\n ], outfile.voxel_size['y'] * data.shape[1] // shape[1\n ], outfile.voxel_size['x'] * data.shape[2] // shape[2]\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-4": "import logging\nimport numpy\nfrom maptools.util import read, write\nlogger = logging.getLogger(__name__)\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n infile = read(input_filename)\n data = infile.data\n logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(\n shape)))\n data = array_rebin(data, shape)\n outfile = write(output_filename, data, infile=infile)\n outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0\n ], outfile.voxel_size['y'] * data.shape[1] // shape[1\n ], outfile.voxel_size['x'] * data.shape[2] // shape[2]\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-5": "#\n# Copyright (C) 2020 RFI\n#\n# Author: James Parkhurst\n#\n# This code is distributed under the GPLv3 license, a copy of\n# which is included in the root directory of this package.\n#\nimport logging\nimport numpy\nfrom maptools.util import read, write\n\n\n# Get the logger\nlogger = logging.getLogger(__name__)\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n\n # Ensure dimensions are consistent\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n\n # Get pairs of (shape, bin factor) for each dimension\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n\n # Rebin the array\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n\n # Open the input file\n infile = read(input_filename)\n\n # Get the data\n data = infile.data\n\n # Get the subset of data\n logger.info(\"Resampling map from shape %s to %s\" % (data.shape, tuple(shape)))\n data = array_rebin(data, shape)\n\n # Write the output file\n outfile = write(output_filename, data, infile=infile)\n\n # Update the voxel size\n outfile.voxel_size = (\n outfile.voxel_size[\"z\"] * data.shape[0] // shape[0],\n outfile.voxel_size[\"y\"] * data.shape[1] // shape[1],\n outfile.voxel_size[\"x\"] * data.shape[2] // shape[2],\n )\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == \"str\" or \"input_filename\" in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
smodelsOutput = {'OutputStatus': {'sigmacut': 0.01, 'minmassgap': 5.0,
'maxcond': 0.2, 'ncpus': 1, 'file status': 1, 'decomposition status': 1,
'warnings': 'Input file ok', 'input file':
'inputFiles/scanExample/slha/100968509.slha', 'database version':
'1.2.0', 'smodels version': '1.2.0rc'}, 'ExptRes': [{'maxcond': 0.0,
'theory prediction (fb)': 728.7491431153657, 'upper limit (fb)':
44.22312638711652, 'expected upper limit (fb)': None, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'CMS-SUS-16-033', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0,
'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 16.478915053090216,
'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)':
728.7491431153657, 'upper limit (fb)': 55.74859999999999,
'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[
541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-16-036',
'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,
'dataType': 'upperLimit', 'r': 13.072061775817971, 'r_expected': None},
{'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284,
'upper limit (fb)': 36.140272, 'expected upper limit (fb)': None,
'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],
'AnalysisID': 'CMS-SUS-13-019', 'DataSetID': None,
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType':
'upperLimit', 'r': 3.675671341725177, 'r_expected': None}, {'maxcond':
0.0, 'theory prediction (fb)': 0.9562482176560967, 'upper limit (fb)':
0.274, 'expected upper limit (fb)': 0.154, 'TxNames': ['T2', 'T5',
'TChiZZ'], 'Mass (GeV)': None, 'AnalysisID': 'CMS-SUS-13-012',
'DataSetID': '6NJet8_1250HT1500_450MHTinf', 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 19.5, 'dataType': 'efficiencyMap', 'r':
3.489956998744878, 'r_expected': 6.209404010753875, 'chi2':
13.063642260056689, 'likelihood': 6.008581252238334e-05}, {'maxcond':
0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)':
58.50226240000003, 'expected upper limit (fb)': None, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'ATLAS-SUSY-2013-02', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 2.270677348583237,
'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)':
9.084517413967422, 'upper limit (fb)': 4.2419,
'expected upper limit (fb)': 5.5524, 'TxNames': ['T2'], 'Mass (GeV)': [
[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-02',
'DataSetID': 'SR2jm', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,
'dataType': 'efficiencyMap', 'r': 2.141615175739037, 'r_expected':
1.6361424634333661, 'chi2': 11.844156696751806, 'likelihood':
3.1390377843658383e-07}, {'maxcond': 0.0, 'theory prediction (fb)':
132.83976207255284, 'upper limit (fb)': 67.69032800000002,
'expected upper limit (fb)': 67.79354400000003, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'CMS-SUS-12-028', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 11.7, 'dataType': 'upperLimit', 'r': 1.9624629691933657,
'r_expected': 1.9594751097914693}, {'maxcond': 0.0,
'theory prediction (fb)': 0.7285976790027092, 'upper limit (fb)': 0.506,
'expected upper limit (fb)': 0.464, 'TxNames': ['T5'], 'Mass (GeV)': [[
881.8, 541.4, 57.4], [881.8, 541.4, 57.4]], 'AnalysisID':
'ATLAS-SUSY-2013-04', 'DataSetID': 'GtGrid_SR_7ej80_0bjet',
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'efficiencyMap', 'r': 1.4399163616654331, 'r_expected':
1.5702536185403213, 'chi2': 7.225026655774327, 'likelihood':
0.0005573265805884188}, {'maxcond': 0.0, 'theory prediction (fb)':
132.83976207255284, 'upper limit (fb)': 97.78847200000001,
'expected upper limit (fb)': 69.450736, 'TxNames': ['T2'], 'Mass (GeV)':
[[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-13-012',
'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5,
'dataType': 'upperLimit', 'r': 1.358439899465377, 'r_expected':
1.9127192845379328}, {'maxcond': 0.0, 'theory prediction (fb)':
4.245413557698921, 'upper limit (fb)': 4.0, 'expected upper limit (fb)':
4.16, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],
'AnalysisID': 'ATLAS-CONF-2013-047', 'DataSetID': 'C Medium',
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'efficiencyMap', 'r': 1.0613533894247302, 'r_expected':
1.0205321052160867, 'chi2': 2.344696287811548, 'likelihood':
8.123400145704854e-05}, {'maxcond': 0.0, 'theory prediction (fb)':
284.6597475, 'upper limit (fb)': 1041.0116, 'expected upper limit (fb)':
None, 'TxNames': ['TChiWZ'], 'Mass (GeV)': [[163.6, 57.4], [165.0, 57.4
]], 'AnalysisID': 'ATLAS-SUSY-2013-12', 'DataSetID': None,
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'upperLimit', 'r': 0.2734453175161545, 'r_expected': None}, {'maxcond':
0.0, 'theory prediction (fb)': 169.351124, 'upper limit (fb)': 1582.346,
'expected upper limit (fb)': None, 'TxNames': ['TChiWW'], 'Mass (GeV)':
[[163.6, 57.4], [163.6, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-11',
'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,
'dataType': 'upperLimit', 'r': 0.10702534338254717, 'r_expected': None},
{'maxcond': 0.0, 'theory prediction (fb)': 0.10289469462216802,
'upper limit (fb)': 1.07, 'expected upper limit (fb)': 1.17, 'TxNames':
['TChiWW'], 'Mass (GeV)': [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID':
'ATLAS-SUSY-2013-11', 'DataSetID': 'WWa-DF', 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r':
0.09616326600202618, 'r_expected': 0.08794418343775044, 'chi2':
0.23492769120756485, 'likelihood': 0.0021296922629215516}, {'maxcond':
0.0, 'theory prediction (fb)': 0.09049519199332233, 'upper limit (fb)':
0.97, 'expected upper limit (fb)': 0.762, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'ATLAS-CONF-2013-054', 'DataSetID': '8j50 flavor 0 b-jets',
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'efficiencyMap', 'r': 0.09329401236424983, 'r_expected':
0.11876009447942563, 'chi2': 0.13085006931201093, 'likelihood':
0.005704888785414326}, {'maxcond': 0.0, 'theory prediction (fb)':
602.7377329999999, 'upper limit (fb)': 17857.06,
'expected upper limit (fb)': None, 'TxNames': ['TChiWZ'], 'Mass (GeV)':
[[163.6, 57.4], [165.0, 57.4]], 'AnalysisID': 'CMS-SUS-16-034',
'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,
'dataType': 'upperLimit', 'r': 0.033753469664099235, 'r_expected': None
}], 'Total xsec considered (fb)': 5455.932556090008,
'Missed Topologies': [{'sqrts (TeV)': 13.0, 'weight (fb)':
1525.2339345595758, 'element': "[[[jet]],[[jet],[jet]]] ('MET', 'MET')"
}, {'sqrts (TeV)': 13.0, 'weight (fb)': 164.5650363, 'element':
"[[],[[W]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)':
131.21450642075922, 'element':
"[[[jet],[Z]],[[jet],[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0,
'weight (fb)': 131.09407599353733, 'element':
"[[[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0,
'weight (fb)': 125.30880443708375, 'element':
"[[[jet]],[[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0,
'weight (fb)': 109.09980502038648, 'element':
"[[[jet],[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)':
13.0, 'weight (fb)': 87.78855441, 'element':
"[[],[[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)':
23.328775686902066, 'element': "[[],[[jet]]] ('MET', 'MET')"}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 18.943846, 'element':
"[[],[]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)':
11.23256793951906, 'element':
"[[[jet],[Z]],[[jet],[jet],[Z]]] ('MET', 'MET')"}], 'Long Cascades': [{
'sqrts (TeV)': 13.0, 'weight (fb)': 142.32664393305637, 'mother PIDs':
[[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 113.78856056272761, 'mother PIDs': [[1000021, 1000021]]},
{'sqrts (TeV)': 13.0, 'weight (fb)': 2.556908397604195, 'mother PIDs':
[[2000001, 2000002], [2000002, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 1.658904680547042, 'mother PIDs': [[1000021, 2000002]]},
{'sqrts (TeV)': 13.0, 'weight (fb)': 1.5034517332026478, 'mother PIDs':
[[1000002, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)':
0.73751489438902, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)':
13.0, 'weight (fb)': 0.514380675953777, 'mother PIDs': [[1000001,
2000001], [1000001, 2000003], [1000003, 2000001], [1000003, 2000003]]},
{'sqrts (TeV)': 13.0, 'weight (fb)': 0.22710347967142056, 'mother PIDs':
[[1000002, 2000001], [1000002, 2000003]]}], 'Asymmetric Branches': [{
'sqrts (TeV)': 13.0, 'weight (fb)': 1656.3887238722155, 'mother PIDs':
[[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 164.5650363, 'mother PIDs': [[1000022, 1000024]]}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 126.94317745006455, 'mother PIDs':
[[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 81.7049616, 'mother PIDs': [[
1000022, 1000023]]}, {'sqrts (TeV)': 13.0, 'weight (fb)':
25.33546877159406, 'mother PIDs': [[1000022, 2000001], [1000022,
2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 8.580393075610981,
'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 6.08359281, 'mother PIDs': [[1000022, 1000025]]}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 2.055186185956878, 'mother PIDs': [
[1000025, 2000001], [1000025, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 0.5969685251910638, 'mother PIDs': [[1000023, 2000001],
[1000023, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)':
0.42547403652557386, 'mother PIDs': [[1000021, 1000025]]}],
'Outside Grid': [{'sqrts (TeV)': 13.0, 'weight (fb)':
0.07215987170114271, 'element': "[[[jet]],[[jet]]] ('MET', 'MET')"}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 0.021621502520314927, 'element':
"[[[l]],[[l]]] ('MET', 'MET')"}]}
|
normal
|
{
"blob_id": "94d303716eac7fa72370435fe7d4d1cdac0cdc48",
"index": 6151,
"step-1": "<mask token>\n",
"step-2": "smodelsOutput = {'OutputStatus': {'sigmacut': 0.01, 'minmassgap': 5.0,\n 'maxcond': 0.2, 'ncpus': 1, 'file status': 1, 'decomposition status': 1,\n 'warnings': 'Input file ok', 'input file':\n 'inputFiles/scanExample/slha/100968509.slha', 'database version':\n '1.2.0', 'smodels version': '1.2.0rc'}, 'ExptRes': [{'maxcond': 0.0,\n 'theory prediction (fb)': 728.7491431153657, 'upper limit (fb)': \n 44.22312638711652, 'expected upper limit (fb)': None, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'CMS-SUS-16-033', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0,\n 'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 16.478915053090216,\n 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': \n 728.7491431153657, 'upper limit (fb)': 55.74859999999999,\n 'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[\n 541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-16-036',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,\n 'dataType': 'upperLimit', 'r': 13.072061775817971, 'r_expected': None},\n {'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284,\n 'upper limit (fb)': 36.140272, 'expected upper limit (fb)': None,\n 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],\n 'AnalysisID': 'CMS-SUS-13-019', 'DataSetID': None,\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType':\n 'upperLimit', 'r': 3.675671341725177, 'r_expected': None}, {'maxcond': \n 0.0, 'theory prediction (fb)': 0.9562482176560967, 'upper limit (fb)': \n 0.274, 'expected upper limit (fb)': 0.154, 'TxNames': ['T2', 'T5',\n 'TChiZZ'], 'Mass (GeV)': None, 'AnalysisID': 'CMS-SUS-13-012',\n 'DataSetID': '6NJet8_1250HT1500_450MHTinf', 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 19.5, 'dataType': 'efficiencyMap', 'r': \n 3.489956998744878, 'r_expected': 6.209404010753875, 'chi2': \n 13.063642260056689, 'likelihood': 6.008581252238334e-05}, {'maxcond': \n 0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)': \n 58.50226240000003, 'expected upper limit (fb)': None, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'ATLAS-SUSY-2013-02', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 2.270677348583237,\n 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': \n 9.084517413967422, 'upper limit (fb)': 4.2419,\n 'expected upper limit (fb)': 5.5524, 'TxNames': ['T2'], 'Mass (GeV)': [\n [541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-02',\n 'DataSetID': 'SR2jm', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,\n 'dataType': 'efficiencyMap', 'r': 2.141615175739037, 'r_expected': \n 1.6361424634333661, 'chi2': 11.844156696751806, 'likelihood': \n 3.1390377843658383e-07}, {'maxcond': 0.0, 'theory prediction (fb)': \n 132.83976207255284, 'upper limit (fb)': 67.69032800000002,\n 'expected upper limit (fb)': 67.79354400000003, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'CMS-SUS-12-028', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 11.7, 'dataType': 'upperLimit', 'r': 1.9624629691933657,\n 'r_expected': 1.9594751097914693}, {'maxcond': 0.0,\n 'theory prediction (fb)': 0.7285976790027092, 'upper limit (fb)': 0.506,\n 'expected upper limit (fb)': 0.464, 'TxNames': ['T5'], 'Mass (GeV)': [[\n 881.8, 541.4, 57.4], [881.8, 541.4, 57.4]], 'AnalysisID':\n 'ATLAS-SUSY-2013-04', 'DataSetID': 'GtGrid_SR_7ej80_0bjet',\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'efficiencyMap', 'r': 1.4399163616654331, 'r_expected': \n 1.5702536185403213, 'chi2': 7.225026655774327, 'likelihood': \n 0.0005573265805884188}, {'maxcond': 0.0, 'theory prediction (fb)': \n 132.83976207255284, 'upper limit (fb)': 97.78847200000001,\n 'expected upper limit (fb)': 69.450736, 'TxNames': ['T2'], 'Mass (GeV)':\n [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-13-012',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5,\n 'dataType': 'upperLimit', 'r': 1.358439899465377, 'r_expected': \n 1.9127192845379328}, {'maxcond': 0.0, 'theory prediction (fb)': \n 4.245413557698921, 'upper limit (fb)': 4.0, 'expected upper limit (fb)':\n 4.16, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],\n 'AnalysisID': 'ATLAS-CONF-2013-047', 'DataSetID': 'C Medium',\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'efficiencyMap', 'r': 1.0613533894247302, 'r_expected': \n 1.0205321052160867, 'chi2': 2.344696287811548, 'likelihood': \n 8.123400145704854e-05}, {'maxcond': 0.0, 'theory prediction (fb)': \n 284.6597475, 'upper limit (fb)': 1041.0116, 'expected upper limit (fb)':\n None, 'TxNames': ['TChiWZ'], 'Mass (GeV)': [[163.6, 57.4], [165.0, 57.4\n ]], 'AnalysisID': 'ATLAS-SUSY-2013-12', 'DataSetID': None,\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'upperLimit', 'r': 0.2734453175161545, 'r_expected': None}, {'maxcond':\n 0.0, 'theory prediction (fb)': 169.351124, 'upper limit (fb)': 1582.346,\n 'expected upper limit (fb)': None, 'TxNames': ['TChiWW'], 'Mass (GeV)':\n [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-11',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,\n 'dataType': 'upperLimit', 'r': 0.10702534338254717, 'r_expected': None},\n {'maxcond': 0.0, 'theory prediction (fb)': 0.10289469462216802,\n 'upper limit (fb)': 1.07, 'expected upper limit (fb)': 1.17, 'TxNames':\n ['TChiWW'], 'Mass (GeV)': [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID':\n 'ATLAS-SUSY-2013-11', 'DataSetID': 'WWa-DF', 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': \n 0.09616326600202618, 'r_expected': 0.08794418343775044, 'chi2': \n 0.23492769120756485, 'likelihood': 0.0021296922629215516}, {'maxcond': \n 0.0, 'theory prediction (fb)': 0.09049519199332233, 'upper limit (fb)':\n 0.97, 'expected upper limit (fb)': 0.762, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'ATLAS-CONF-2013-054', 'DataSetID': '8j50 flavor 0 b-jets',\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'efficiencyMap', 'r': 0.09329401236424983, 'r_expected': \n 0.11876009447942563, 'chi2': 0.13085006931201093, 'likelihood': \n 0.005704888785414326}, {'maxcond': 0.0, 'theory prediction (fb)': \n 602.7377329999999, 'upper limit (fb)': 17857.06,\n 'expected upper limit (fb)': None, 'TxNames': ['TChiWZ'], 'Mass (GeV)':\n [[163.6, 57.4], [165.0, 57.4]], 'AnalysisID': 'CMS-SUS-16-034',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,\n 'dataType': 'upperLimit', 'r': 0.033753469664099235, 'r_expected': None\n }], 'Total xsec considered (fb)': 5455.932556090008,\n 'Missed Topologies': [{'sqrts (TeV)': 13.0, 'weight (fb)': \n 1525.2339345595758, 'element': \"[[[jet]],[[jet],[jet]]] ('MET', 'MET')\"\n }, {'sqrts (TeV)': 13.0, 'weight (fb)': 164.5650363, 'element':\n \"[[],[[W]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 131.21450642075922, 'element':\n \"[[[jet],[Z]],[[jet],[jet]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 131.09407599353733, 'element':\n \"[[[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 125.30880443708375, 'element':\n \"[[[jet]],[[jet],[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 109.09980502038648, 'element':\n \"[[[jet],[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': \n 13.0, 'weight (fb)': 87.78855441, 'element':\n \"[[],[[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 23.328775686902066, 'element': \"[[],[[jet]]] ('MET', 'MET')\"}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 18.943846, 'element':\n \"[[],[]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 11.23256793951906, 'element':\n \"[[[jet],[Z]],[[jet],[jet],[Z]]] ('MET', 'MET')\"}], 'Long Cascades': [{\n 'sqrts (TeV)': 13.0, 'weight (fb)': 142.32664393305637, 'mother PIDs':\n [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 113.78856056272761, 'mother PIDs': [[1000021, 1000021]]},\n {'sqrts (TeV)': 13.0, 'weight (fb)': 2.556908397604195, 'mother PIDs':\n [[2000001, 2000002], [2000002, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 1.658904680547042, 'mother PIDs': [[1000021, 2000002]]},\n {'sqrts (TeV)': 13.0, 'weight (fb)': 1.5034517332026478, 'mother PIDs':\n [[1000002, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 0.73751489438902, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)':\n 13.0, 'weight (fb)': 0.514380675953777, 'mother PIDs': [[1000001, \n 2000001], [1000001, 2000003], [1000003, 2000001], [1000003, 2000003]]},\n {'sqrts (TeV)': 13.0, 'weight (fb)': 0.22710347967142056, 'mother PIDs':\n [[1000002, 2000001], [1000002, 2000003]]}], 'Asymmetric Branches': [{\n 'sqrts (TeV)': 13.0, 'weight (fb)': 1656.3887238722155, 'mother PIDs':\n [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 164.5650363, 'mother PIDs': [[1000022, 1000024]]}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 126.94317745006455, 'mother PIDs':\n [[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 81.7049616, 'mother PIDs': [[\n 1000022, 1000023]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 25.33546877159406, 'mother PIDs': [[1000022, 2000001], [1000022, \n 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 8.580393075610981,\n 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 6.08359281, 'mother PIDs': [[1000022, 1000025]]}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 2.055186185956878, 'mother PIDs': [\n [1000025, 2000001], [1000025, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 0.5969685251910638, 'mother PIDs': [[1000023, 2000001],\n [1000023, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 0.42547403652557386, 'mother PIDs': [[1000021, 1000025]]}],\n 'Outside Grid': [{'sqrts (TeV)': 13.0, 'weight (fb)': \n 0.07215987170114271, 'element': \"[[[jet]],[[jet]]] ('MET', 'MET')\"}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 0.021621502520314927, 'element':\n \"[[[l]],[[l]]] ('MET', 'MET')\"}]}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SECRET_KEY = os.environ['SECRET_KEY']
ALLOWED_HOSTS = ['demo.pythonic.nl']
DEBUG = False
<|reserved_special_token_1|>
from .base import *
import os
SECRET_KEY = os.environ['SECRET_KEY']
ALLOWED_HOSTS = ['demo.pythonic.nl']
DEBUG = False
|
flexible
|
{
"blob_id": "e5607d9893b775b216d1790897124a673b190c26",
"index": 2085,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSECRET_KEY = os.environ['SECRET_KEY']\nALLOWED_HOSTS = ['demo.pythonic.nl']\nDEBUG = False\n",
"step-3": "from .base import *\nimport os\nSECRET_KEY = os.environ['SECRET_KEY']\nALLOWED_HOSTS = ['demo.pythonic.nl']\nDEBUG = False\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class GroupSignature:
def __init__(self, groupObj):
global util, group
util = SecretUtil(groupObj, debug)
self.group = groupObj
def pkGen(self, h1str):
gstr = (
'[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'
)
g2str = (
'[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'
)
u0str = (
'[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'
)
u1str = (
'[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'
)
u2str = (
'[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'
)
u3str = (
'[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'
)
u4str = (
'[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'
)
hstr = (
'[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'
)
nstr = (
'[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'
)
g = self.group.fromstr(gstr, 10, G1)
g2 = self.group.fromstr(g2str, 10, G2)
u0 = self.group.fromstr(u0str, 10, G2)
u1 = self.group.fromstr(u1str, 10, G2)
u2 = self.group.fromstr(u2str, 10, G2)
u3 = self.group.fromstr(u3str, 10, G2)
u4 = self.group.fromstr(u4str, 10, G2)
h = self.group.fromstr(hstr, 10, G1)
n = self.group.fromstr(nstr, 10, GT)
h1 = self.group.fromstr(h1str, 10, G1)
pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,
'u4': u4, 'h': h, 'n': n, 'h1': h1}
return pk
def uskGen(self, usklist, pk, GID, UID, L, k):
t1 = time()
b0 = self.group.gen1_0(1)
b3 = self.group.gen1_0(1)
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * usklist[i]['b0'] ** L[i]
b3 = b3 * usklist[i]['b3'] ** L[i]
b4 = b4 * usklist[i]['b4'] ** L[i]
b5 = b5 * usklist[i]['b5'] ** L[i]
b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2
b3 = b3 * pk['u3'] ** r2
b4 = b4 * pk['u4'] ** r2
b5 = b5 * pk['g'] ** r2
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open('extracttime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if i + 1 != j:
L[i] = L[i] * (J / (J - I))
return L
def verifyUsk(self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 = usk['b5']
b3 = usk['b3']
b4 = usk['b4']
return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID
) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,
b4) == pair(b5, u4)
def sign(self, title, usk, pk, GID, UID, groupID):
t1 = time()
m = self.group.hash(title)
b0 = usk['b0']
b3 = usk['b3']
b4 = usk['b4']
b5 = usk['b5']
r4 = self.group.random(ZR)
r3 = self.group.random(ZR)
k = self.group.random(ZR)
c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[
'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3
c5 = b5 * pk['g'] ** r3
c6 = pk['u2'] ** UID * pk['u4'] ** r4
e1 = pk['g'] ** k
e2 = (pk['u0'] * pk['u1'] ** GID) ** k
e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k
f = pk['u0'] * pk['u1'] ** GID
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = pk['u2'] ** k1 * pk['u4'] ** k2
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = pk['n'] ** k1 * gp ** k3
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':
e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open('gssigntime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('gs time', t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),
'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * oklist[i]['ok1'] ** L[i]
ok2 = ok2 * oklist[i]['ok2'] ** L[i]
t2 = time()
with open('opentime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('open time', t2 - t1)
return ok1 / ok2
<|reserved_special_token_0|>
def creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title, body, groupID='computer')
commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)
return ssig, permlink, commit_tx
<|reserved_special_token_0|>
def mul_tx_broad(txlist):
threads = []
for tx in txlist:
t = MyThread(tx_broad, args=(tx,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GroupSignature:
def __init__(self, groupObj):
global util, group
util = SecretUtil(groupObj, debug)
self.group = groupObj
def pkGen(self, h1str):
gstr = (
'[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'
)
g2str = (
'[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'
)
u0str = (
'[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'
)
u1str = (
'[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'
)
u2str = (
'[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'
)
u3str = (
'[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'
)
u4str = (
'[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'
)
hstr = (
'[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'
)
nstr = (
'[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'
)
g = self.group.fromstr(gstr, 10, G1)
g2 = self.group.fromstr(g2str, 10, G2)
u0 = self.group.fromstr(u0str, 10, G2)
u1 = self.group.fromstr(u1str, 10, G2)
u2 = self.group.fromstr(u2str, 10, G2)
u3 = self.group.fromstr(u3str, 10, G2)
u4 = self.group.fromstr(u4str, 10, G2)
h = self.group.fromstr(hstr, 10, G1)
n = self.group.fromstr(nstr, 10, GT)
h1 = self.group.fromstr(h1str, 10, G1)
pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,
'u4': u4, 'h': h, 'n': n, 'h1': h1}
return pk
def uskGen(self, usklist, pk, GID, UID, L, k):
t1 = time()
b0 = self.group.gen1_0(1)
b3 = self.group.gen1_0(1)
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * usklist[i]['b0'] ** L[i]
b3 = b3 * usklist[i]['b3'] ** L[i]
b4 = b4 * usklist[i]['b4'] ** L[i]
b5 = b5 * usklist[i]['b5'] ** L[i]
b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2
b3 = b3 * pk['u3'] ** r2
b4 = b4 * pk['u4'] ** r2
b5 = b5 * pk['g'] ** r2
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open('extracttime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if i + 1 != j:
L[i] = L[i] * (J / (J - I))
return L
def verifyUsk(self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 = usk['b5']
b3 = usk['b3']
b4 = usk['b4']
return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID
) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,
b4) == pair(b5, u4)
def sign(self, title, usk, pk, GID, UID, groupID):
t1 = time()
m = self.group.hash(title)
b0 = usk['b0']
b3 = usk['b3']
b4 = usk['b4']
b5 = usk['b5']
r4 = self.group.random(ZR)
r3 = self.group.random(ZR)
k = self.group.random(ZR)
c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[
'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3
c5 = b5 * pk['g'] ** r3
c6 = pk['u2'] ** UID * pk['u4'] ** r4
e1 = pk['g'] ** k
e2 = (pk['u0'] * pk['u1'] ** GID) ** k
e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k
f = pk['u0'] * pk['u1'] ** GID
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = pk['u2'] ** k1 * pk['u4'] ** k2
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = pk['n'] ** k1 * gp ** k3
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':
e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open('gssigntime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('gs time', t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),
'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * oklist[i]['ok1'] ** L[i]
ok2 = ok2 * oklist[i]['ok2'] ** L[i]
t2 = time()
with open('opentime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('open time', t2 - t1)
return ok1 / ok2
<|reserved_special_token_0|>
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print('the number of ok is not enough\n')
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance,
wallet_instance=wallet_instance, no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
re = tx.broadcast()
return re
<|reserved_special_token_0|>
def annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title='paper_title', body='paper_body', groupID='computer')
re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)
print('commit-re', re)
return ssig, permlink
<|reserved_special_token_0|>
def one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):
ssiglistone = []
permlinklistone = []
threads = []
for i in range(nodeTX):
t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID,
steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglistone.append(ssig)
permlinklistone.append(permlink)
return ssiglistone, permlinklistone
def one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd,
wallet):
threads = []
for i in range(nodeTX):
t = MyThread(open_tx, args=(account, ssiglistone[i], userID,
permlinklistone[i], steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
<|reserved_special_token_0|>
def creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title, body, groupID='computer')
commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)
return ssig, permlink, commit_tx
def creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance,
wallet_instance, ttitle='paper_title', tbody='paper_body'):
ssiglist = []
permlinklist = []
txlist = []
threads = []
for i in range(num):
t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID,
steemd_instance, wallet_instance, ttitle, tbody))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink, commit_tx = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
txlist.append(commit_tx)
return ssiglist, permlinklist, txlist
def creat_open_tx(account, ssig, userID, permlink, steemd_instance,
wallet_instance):
openop = open_op(account, ssig, userID, permlink)
open_tx = tx_build(openop, steemd_instance, wallet_instance, account)
return open_tx
def creat_num_open_tx(num, account, ssiglist, userID, permlinklist,
steemd_instance, wallet_instance):
opentxlist = []
threads = []
for i in range(num):
t = MyThread(creat_open_tx, args=(account, ssiglist[i], userID,
permlinklist[i], steemd_instance, wallet_instance))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
opentx = t.get_result()
opentxlist.append(opentx)
return opentxlist
def tx_broad(tx):
tx.broadcast()
def mul_tx_broad(txlist):
threads = []
for tx in txlist:
t = MyThread(tx_broad, args=(tx,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GroupSignature:
def __init__(self, groupObj):
global util, group
util = SecretUtil(groupObj, debug)
self.group = groupObj
def pkGen(self, h1str):
gstr = (
'[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'
)
g2str = (
'[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'
)
u0str = (
'[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'
)
u1str = (
'[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'
)
u2str = (
'[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'
)
u3str = (
'[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'
)
u4str = (
'[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'
)
hstr = (
'[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'
)
nstr = (
'[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'
)
g = self.group.fromstr(gstr, 10, G1)
g2 = self.group.fromstr(g2str, 10, G2)
u0 = self.group.fromstr(u0str, 10, G2)
u1 = self.group.fromstr(u1str, 10, G2)
u2 = self.group.fromstr(u2str, 10, G2)
u3 = self.group.fromstr(u3str, 10, G2)
u4 = self.group.fromstr(u4str, 10, G2)
h = self.group.fromstr(hstr, 10, G1)
n = self.group.fromstr(nstr, 10, GT)
h1 = self.group.fromstr(h1str, 10, G1)
pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,
'u4': u4, 'h': h, 'n': n, 'h1': h1}
return pk
def uskGen(self, usklist, pk, GID, UID, L, k):
t1 = time()
b0 = self.group.gen1_0(1)
b3 = self.group.gen1_0(1)
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * usklist[i]['b0'] ** L[i]
b3 = b3 * usklist[i]['b3'] ** L[i]
b4 = b4 * usklist[i]['b4'] ** L[i]
b5 = b5 * usklist[i]['b5'] ** L[i]
b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2
b3 = b3 * pk['u3'] ** r2
b4 = b4 * pk['u4'] ** r2
b5 = b5 * pk['g'] ** r2
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open('extracttime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if i + 1 != j:
L[i] = L[i] * (J / (J - I))
return L
def verifyUsk(self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 = usk['b5']
b3 = usk['b3']
b4 = usk['b4']
return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID
) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,
b4) == pair(b5, u4)
def sign(self, title, usk, pk, GID, UID, groupID):
t1 = time()
m = self.group.hash(title)
b0 = usk['b0']
b3 = usk['b3']
b4 = usk['b4']
b5 = usk['b5']
r4 = self.group.random(ZR)
r3 = self.group.random(ZR)
k = self.group.random(ZR)
c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[
'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3
c5 = b5 * pk['g'] ** r3
c6 = pk['u2'] ** UID * pk['u4'] ** r4
e1 = pk['g'] ** k
e2 = (pk['u0'] * pk['u1'] ** GID) ** k
e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k
f = pk['u0'] * pk['u1'] ** GID
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = pk['u2'] ** k1 * pk['u4'] ** k2
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = pk['n'] ** k1 * gp ** k3
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':
e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open('gssigntime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('gs time', t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),
'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * oklist[i]['ok1'] ** L[i]
ok2 = ok2 * oklist[i]['ok2'] ** L[i]
t2 = time()
with open('opentime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('open time', t2 - t1)
return ok1 / ok2
<|reserved_special_token_0|>
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print('the number of ok is not enough\n')
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance,
wallet_instance=wallet_instance, no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
re = tx.broadcast()
return re
<|reserved_special_token_0|>
def annoy_commit(account, usk, pk, GID, UID, title='paper_title', body=
'paper_body', groupID='computer'):
annoy_author = 'nya'
sig = group_signature.sign(title, usk, pk, GID, UID, groupID)
permlink = ''.join(random.choices(string.digits, k=7))
print('permlink is ' + permlink)
op = operations.CommitPaper(**{'account': account, 'author':
annoy_author, 'permlink': permlink, 'title': title, 'body': body,
'json_metadata': '', 'c0': str(sig['c0']), 'c5': str(sig['c5']),
'c6': str(sig['c6']), 'e1': str(sig['e1']), 'e2': str(sig['e2']),
'e3': str(sig['e3']), 'c': str(sig['c']), 's1': str(sig['s1']),
's2': str(sig['s2']), 's3': str(sig['s3'])})
print('commitop', op)
return op, sig, permlink
<|reserved_special_token_0|>
def annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title='paper_title', body='paper_body', groupID='computer')
re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)
print('commit-re', re)
return ssig, permlink
<|reserved_special_token_0|>
def one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):
ssiglistone = []
permlinklistone = []
threads = []
for i in range(nodeTX):
t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID,
steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglistone.append(ssig)
permlinklistone.append(permlink)
return ssiglistone, permlinklistone
def one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd,
wallet):
threads = []
for i in range(nodeTX):
t = MyThread(open_tx, args=(account, ssiglistone[i], userID,
permlinklistone[i], steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def mul_annoy_tx(usk, pk, UID):
ssiglist = []
permlinklist = []
threads = []
for i in range(n):
t = MyThread(one_mul_annoy_tx, args=(accountlist[i], usk, pk, UID,
clientlist[i].steemd, clientlist[i].wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
return ssiglist, permlinklist
def mul_open_tx(ssiglist, permlinklist, userID):
threads = []
for i in range(n):
t = MyThread(one_mul_open_tx, args=(accountlist[i], ssiglist[i],
userID, permlinklist[i], clientlist[i].steemd, clientlist[i].
wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title, body, groupID='computer')
commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)
return ssig, permlink, commit_tx
def creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance,
wallet_instance, ttitle='paper_title', tbody='paper_body'):
ssiglist = []
permlinklist = []
txlist = []
threads = []
for i in range(num):
t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID,
steemd_instance, wallet_instance, ttitle, tbody))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink, commit_tx = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
txlist.append(commit_tx)
return ssiglist, permlinklist, txlist
def creat_open_tx(account, ssig, userID, permlink, steemd_instance,
wallet_instance):
openop = open_op(account, ssig, userID, permlink)
open_tx = tx_build(openop, steemd_instance, wallet_instance, account)
return open_tx
def creat_num_open_tx(num, account, ssiglist, userID, permlinklist,
steemd_instance, wallet_instance):
opentxlist = []
threads = []
for i in range(num):
t = MyThread(creat_open_tx, args=(account, ssiglist[i], userID,
permlinklist[i], steemd_instance, wallet_instance))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
opentx = t.get_result()
opentxlist.append(opentx)
return opentxlist
def tx_broad(tx):
tx.broadcast()
def mul_tx_broad(txlist):
threads = []
for tx in txlist:
t = MyThread(tx_broad, args=(tx,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GroupSignature:
def __init__(self, groupObj):
global util, group
util = SecretUtil(groupObj, debug)
self.group = groupObj
def pkGen(self, h1str):
gstr = (
'[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'
)
g2str = (
'[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'
)
u0str = (
'[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'
)
u1str = (
'[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'
)
u2str = (
'[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'
)
u3str = (
'[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'
)
u4str = (
'[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'
)
hstr = (
'[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'
)
nstr = (
'[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'
)
g = self.group.fromstr(gstr, 10, G1)
g2 = self.group.fromstr(g2str, 10, G2)
u0 = self.group.fromstr(u0str, 10, G2)
u1 = self.group.fromstr(u1str, 10, G2)
u2 = self.group.fromstr(u2str, 10, G2)
u3 = self.group.fromstr(u3str, 10, G2)
u4 = self.group.fromstr(u4str, 10, G2)
h = self.group.fromstr(hstr, 10, G1)
n = self.group.fromstr(nstr, 10, GT)
h1 = self.group.fromstr(h1str, 10, G1)
pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,
'u4': u4, 'h': h, 'n': n, 'h1': h1}
return pk
def uskGen(self, usklist, pk, GID, UID, L, k):
t1 = time()
b0 = self.group.gen1_0(1)
b3 = self.group.gen1_0(1)
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * usklist[i]['b0'] ** L[i]
b3 = b3 * usklist[i]['b3'] ** L[i]
b4 = b4 * usklist[i]['b4'] ** L[i]
b5 = b5 * usklist[i]['b5'] ** L[i]
b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2
b3 = b3 * pk['u3'] ** r2
b4 = b4 * pk['u4'] ** r2
b5 = b5 * pk['g'] ** r2
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open('extracttime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if i + 1 != j:
L[i] = L[i] * (J / (J - I))
return L
def verifyUsk(self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 = usk['b5']
b3 = usk['b3']
b4 = usk['b4']
return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID
) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,
b4) == pair(b5, u4)
def sign(self, title, usk, pk, GID, UID, groupID):
t1 = time()
m = self.group.hash(title)
b0 = usk['b0']
b3 = usk['b3']
b4 = usk['b4']
b5 = usk['b5']
r4 = self.group.random(ZR)
r3 = self.group.random(ZR)
k = self.group.random(ZR)
c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[
'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3
c5 = b5 * pk['g'] ** r3
c6 = pk['u2'] ** UID * pk['u4'] ** r4
e1 = pk['g'] ** k
e2 = (pk['u0'] * pk['u1'] ** GID) ** k
e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k
f = pk['u0'] * pk['u1'] ** GID
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = pk['u2'] ** k1 * pk['u4'] ** k2
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = pk['n'] ** k1 * gp ** k3
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':
e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open('gssigntime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('gs time', t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),
'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * oklist[i]['ok1'] ** L[i]
ok2 = ok2 * oklist[i]['ok2'] ** L[i]
t2 = time()
with open('opentime.txt', 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print('open time', t2 - t1)
return ok1 / ok2
def get_usk(userID, GID, UID, h1str='', count=0):
pk = {}
for i in range(n):
vkliststr.append(clientlist[i].get_vk()['vk'])
vklist.append(group_signature.group.fromstr(vkliststr[i], 10, G1))
uskliststr.append(clientlist[i].user_extract(userID))
usklist.append({})
usklist[i]['b0'] = group_signature.group.fromstr(uskliststr[i]['b0'
], 10, G2)
usklist[i]['b3'] = group_signature.group.fromstr(uskliststr[i]['b3'
], 10, G2)
usklist[i]['b4'] = group_signature.group.fromstr(uskliststr[i]['b4'
], 10, G2)
usklist[i]['b5'] = group_signature.group.fromstr(uskliststr[i]['b5'
], 10, G1)
print(usklist[i])
if h1str == '' or h1str == '0' or h1str == 0:
h1str = clientlist[i].get_pk()['pk']
print('h1str', h1str)
pk = group_signature.pkGen(h1str)
print('pk---------------\n', pk)
if group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID):
count = count + 1
else:
print('key is invalide\n\n')
usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)
print('usk---------------\n', usk)
return pk, usk
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print('the number of ok is not enough\n')
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance,
wallet_instance=wallet_instance, no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
re = tx.broadcast()
return re
<|reserved_special_token_0|>
def annoy_commit(account, usk, pk, GID, UID, title='paper_title', body=
'paper_body', groupID='computer'):
annoy_author = 'nya'
sig = group_signature.sign(title, usk, pk, GID, UID, groupID)
permlink = ''.join(random.choices(string.digits, k=7))
print('permlink is ' + permlink)
op = operations.CommitPaper(**{'account': account, 'author':
annoy_author, 'permlink': permlink, 'title': title, 'body': body,
'json_metadata': '', 'c0': str(sig['c0']), 'c5': str(sig['c5']),
'c6': str(sig['c6']), 'e1': str(sig['e1']), 'e2': str(sig['e2']),
'e3': str(sig['e3']), 'c': str(sig['c']), 's1': str(sig['s1']),
's2': str(sig['s2']), 's3': str(sig['s3'])})
print('commitop', op)
return op, sig, permlink
<|reserved_special_token_0|>
def annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title='paper_title', body='paper_body', groupID='computer')
re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)
print('commit-re', re)
return ssig, permlink
<|reserved_special_token_0|>
def one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):
ssiglistone = []
permlinklistone = []
threads = []
for i in range(nodeTX):
t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID,
steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglistone.append(ssig)
permlinklistone.append(permlink)
return ssiglistone, permlinklistone
def one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd,
wallet):
threads = []
for i in range(nodeTX):
t = MyThread(open_tx, args=(account, ssiglistone[i], userID,
permlinklistone[i], steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def mul_annoy_tx(usk, pk, UID):
ssiglist = []
permlinklist = []
threads = []
for i in range(n):
t = MyThread(one_mul_annoy_tx, args=(accountlist[i], usk, pk, UID,
clientlist[i].steemd, clientlist[i].wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
return ssiglist, permlinklist
def mul_open_tx(ssiglist, permlinklist, userID):
threads = []
for i in range(n):
t = MyThread(one_mul_open_tx, args=(accountlist[i], ssiglist[i],
userID, permlinklist[i], clientlist[i].steemd, clientlist[i].
wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,
wallet_instance, title='paper_title', body='paper_body'):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,
title, body, groupID='computer')
commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)
return ssig, permlink, commit_tx
def creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance,
wallet_instance, ttitle='paper_title', tbody='paper_body'):
ssiglist = []
permlinklist = []
txlist = []
threads = []
for i in range(num):
t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID,
steemd_instance, wallet_instance, ttitle, tbody))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink, commit_tx = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
txlist.append(commit_tx)
return ssiglist, permlinklist, txlist
def creat_open_tx(account, ssig, userID, permlink, steemd_instance,
wallet_instance):
openop = open_op(account, ssig, userID, permlink)
open_tx = tx_build(openop, steemd_instance, wallet_instance, account)
return open_tx
def creat_num_open_tx(num, account, ssiglist, userID, permlinklist,
steemd_instance, wallet_instance):
opentxlist = []
threads = []
for i in range(num):
t = MyThread(creat_open_tx, args=(account, ssiglist[i], userID,
permlinklist[i], steemd_instance, wallet_instance))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
opentx = t.get_result()
opentxlist.append(opentx)
return opentxlist
def tx_broad(tx):
tx.broadcast()
def mul_tx_broad(txlist):
threads = []
for tx in txlist:
t = MyThread(tx_broad, args=(tx,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
<|reserved_special_token_0|>
def main():
userID = 'zhou'
UID = group_signature.group.hash(userID)
print('uid', UID)
pk, usk = get_usk(userID, GID, UID)
ssig, permlink = annoy_commit_tx(accountlist[0], usk, pk, GID, UID,
clientlist[0].steemd, clientlist[0].wallet, title='paper_title',
body='paper_body')
sleep(3)
open_tx(accountlist[0], ssig, userID, permlink, clientlist[0].steemd,
clientlist[0].wallet)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import random
import string
import steembase
import struct
import steem
from time import sleep
from time import time
from steem.transactionbuilder import TransactionBuilder
from steembase import operations
from steembase.transactions import SignedTransaction
from resultthread import MyThread
from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, GT, pair
from charm.toolbox.secretutil import SecretUtil
class GroupSignature():
def __init__(self, groupObj):
global util, group
util = SecretUtil(groupObj, debug)
self.group = groupObj
def pkGen(self, h1str):
gstr = "[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]"
g2str = "[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]"
u0str = "[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]"
u1str = "[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]"
u2str = "[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]"
u3str = "[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]"
u4str = "[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]"
hstr = "[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]"
nstr = "[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]"
g = self.group.fromstr(gstr, 10, G1)
g2 = self.group.fromstr(g2str, 10, G2)
u0 = self.group.fromstr(u0str, 10, G2)
u1 = self.group.fromstr(u1str, 10, G2)
u2 = self.group.fromstr(u2str, 10, G2)
u3 = self.group.fromstr(u3str, 10, G2)
u4 = self.group.fromstr(u4str, 10, G2)
h = self.group.fromstr(hstr, 10, G1)
n = self.group.fromstr(nstr, 10, GT)
h1 = self.group.fromstr(h1str, 10, G1)
pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3, 'u4': u4, 'h': h, 'n': n, 'h1': h1}
return pk
def uskGen(self, usklist, pk, GID, UID, L, k):
t1 = time()
b0 = self.group.gen1_0(1)
b3 = self.group.gen1_0(1)
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * (usklist[i]['b0'] ** L[i])
b3 = b3 * (usklist[i]['b3'] ** L[i])
b4 = b4 * (usklist[i]['b4'] ** L[i])
b5 = b5 * (usklist[i]['b5'] ** L[i])
b0 = b0 * (pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID)) ** r2
b3 = b3 * (pk['u3'] ** r2)
b4 = b4 * (pk['u4'] ** r2)
b5 = b5 * (pk['g'] ** r2)
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open("extracttime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if (i + 1) != j:
L[i] = L[i] * ((J) / (J - I))
return L
def verifyUsk(self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 = usk['b5']
b3 = usk['b3']
b4 = usk['b4']
return pair(g, b0) == (pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID) * pair(b5, u2 ** UID)) and pair(g,
b3) == pair(
b5, u3) and pair(g, b4) == pair(b5, u4)
def sign(self, title, usk, pk, GID, UID, groupID):
t1 = time()
m = self.group.hash(title)
b0 = usk['b0']
b3 = usk['b3']
b4 = usk['b4']
b5 = usk['b5']
r4 = self.group.random(ZR)
r3 = self.group.random(ZR)
k = self.group.random(ZR)
c0 = b0 * (b3 ** m) * (b4 ** r4) * (
(pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID) * (pk['u3'] ** m) * (pk['u4'] ** r4)) ** r3)
c5 = b5 * (pk['g'] ** r3)
c6 = (pk['u2'] ** UID) * (pk['u4'] ** r4)
e1 = pk['g'] ** k
e2 = (pk['u0'] * (pk['u1'] ** GID)) ** k
e3 = (pk['n'] ** UID) * (pair(pk['h1'], pk['g2']) ** k)
# 产生pok
f = pk['u0'] * (pk['u1'] ** GID)
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = (pk['u2'] ** k1) * (pk['u4'] ** k2)
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = (pk['n'] ** k1) * (gp ** k3)
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3': e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open("gssigntime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print("gs time", t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT), 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * (oklist[i]['ok1'] ** L[i])
ok2 = ok2 * (oklist[i]['ok2'] ** L[i])
t2 = time()
with open("opentime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print("open time", t2 - t1)
return ok1 / ok2
def get_usk(userID, GID, UID, h1str="", count=0):
pk = {}
for i in range(n):
vkliststr.append(clientlist[i].get_vk()['vk'])
vklist.append(group_signature.group.fromstr(vkliststr[i], 10, G1))
uskliststr.append(clientlist[i].user_extract(userID))
usklist.append({})
usklist[i]['b0'] = group_signature.group.fromstr(uskliststr[i]['b0'], 10, G2)
usklist[i]['b3'] = group_signature.group.fromstr(uskliststr[i]['b3'], 10, G2)
usklist[i]['b4'] = group_signature.group.fromstr(uskliststr[i]['b4'], 10, G2)
usklist[i]['b5'] = group_signature.group.fromstr(uskliststr[i]['b5'], 10, G1)
print(usklist[i])
if h1str == "" or h1str == "0" or h1str == 0:
h1str = clientlist[i].get_pk()['pk']
print("h1str", h1str)
pk = group_signature.pkGen(h1str)
print("pk---------------\n", pk)
if (group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID)):
count = count + 1
else:
print("key is invalide\n\n")
usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)
print("usk---------------\n", usk)
return pk, usk
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print("the number of ok is not enough\n")
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
re = tx.broadcast()
return re
def tx_build(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
# re = tx.broadcast()
return tx
def annoy_commit(account, usk, pk, GID, UID, title="paper_title", body="paper_body", groupID="computer"):
annoy_author = 'nya'
# group signature ------title 必须 这里面是对title进行hash 然后使用usk对hash进行签名
sig = group_signature.sign(title, usk, pk, GID, UID, groupID)
permlink = ''.join(random.choices(string.digits, k=7))
print("permlink is " + permlink)
op = operations.CommitPaper(
**{
"account": account,
"author": annoy_author,
"permlink": permlink,
"title": title,
"body": body,
"json_metadata": "",
"c0": str(sig['c0']),
"c5": str(sig['c5']),
"c6": str(sig['c6']),
"e1": str(sig['e1']),
"e2": str(sig['e2']),
"e3": str(sig['e3']),
"c": str(sig['c']),
"s1": str(sig['s1']),
"s2": str(sig['s2']),
"s3": str(sig['s3'])
}
)
print("commitop", op)
return op, sig, permlink
def open_op(account, sig, userID, permlink):
lam = get_lam(sig)
# E = (pk['n'] ** UID) * lam #计算出e3 即签名的e3 判断是否相等
op = operations.ApplyOpen(
**{
'account': account,
'author': userID,
'lambda': str(lam),
'permlink': permlink,
'json_metadata': ""
}
)
return op
def annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title="paper_title",
body="paper_body"):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title="paper_title", body="paper_body",
groupID="computer")
re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)
print("commit-re", re)
return ssig, permlink
def open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):
openop = open_op(account, ssig, userID, permlink)
re = tx_build_broad(openop, steemd_instance, wallet_instance, account)
print("open-re", re)
# 一个节点的 并发产生交易
def one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):
ssiglistone = []
permlinklistone = []
threads = []
for i in range(nodeTX):
t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID, steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglistone.append(ssig)
permlinklistone.append(permlink)
return ssiglistone, permlinklistone
def one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd, wallet):
threads = []
for i in range(nodeTX):
t = MyThread(open_tx,
args=(account, ssiglistone[i], userID, permlinklistone[i], steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def mul_annoy_tx(usk, pk, UID):
ssiglist = []
permlinklist = []
threads = []
for i in range(n):
# t = MyThread(annoy_commit_tx, args=(accountlist[i], usk, pk, GID, UID, clientlist[i].steemd, clientlist[i].wallet))
t = MyThread(one_mul_annoy_tx,
args=(accountlist[i], usk, pk, UID, clientlist[i].steemd, clientlist[i].wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
return ssiglist, permlinklist
# 多个节点, 每个节点并发
def mul_open_tx(ssiglist, permlinklist, userID):
threads = []
for i in range(n):
# t = MyThread(open_tx,
# args=(accountlist[i], ssiglist[i], userID, permlinklist[i], clientlist[i].steemd, clientlist[i].wallet))
t = MyThread(one_mul_open_tx,
args=(
accountlist[i], ssiglist[i], userID, permlinklist[i], clientlist[i].steemd, clientlist[i].wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
# for t in threads:
# t.get_result()
# 仅创造tx 不广播
def creat_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title="paper_title",
body="paper_body"):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title, body, groupID="computer")
commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)
return ssig, permlink, commit_tx
def creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance, wallet_instance, ttitle="paper_title",
tbody="paper_body"):
ssiglist = []
permlinklist = []
txlist = []
threads = []
for i in range(num):
t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID, steemd_instance, wallet_instance, ttitle,
tbody))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink, commit_tx = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
txlist.append(commit_tx)
return ssiglist, permlinklist, txlist
def creat_open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):
openop = open_op(account, ssig, userID, permlink)
open_tx = tx_build(openop, steemd_instance, wallet_instance, account)
return open_tx
def creat_num_open_tx(num, account, ssiglist, userID, permlinklist, steemd_instance, wallet_instance):
opentxlist = []
threads = []
for i in range(num):
t = MyThread(creat_open_tx,
args=(account, ssiglist[i], userID, permlinklist[i], steemd_instance,
wallet_instance))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
opentx = t.get_result()
opentxlist.append(opentx)
return opentxlist
def tx_broad(tx):
tx.broadcast()
def mul_tx_broad(txlist):
threads = []
for tx in txlist:
t = MyThread(tx_broad, args=(tx,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
# public parma
nodeTX = 5
k = 2
n = 3 # (k,n)
# 节点地址
nodelist = [
'http://101.76.208.83:8090',
'http://101.76.208.83:8094',
'http://101.76.208.83:8098'
]
accountlist = ["initminer2", "zy1", "zy2", "zy3", "zy4", "zy5", "zy6", "zy7", "zy8", "zy9", "zy10", "zy11", "zy12",
"zy13", "zy14", "zy15", "zy16", "zy17", "zy18", "zy19", "zy20"]
# 除了第一个 其他的都是posting key 5Hs4jcm5X4sanCnUKNFCjrq2irN8sH1Krzsb13Qd6DHqutZbhqu
keylist = ['5J3yMruND2TADZ7cZc6Cnp4VePrnehei2wvGdnLgf3aEj2nDGhc', '5Hs4jcm5X4sanCnUKNFCjrq2irN8sH1Krzsb13Qd6DHqutZbhqu', "5KPLLsQ3MuWgKvNYqAFRjziWZenBqefDhSe4K1uYuj8hT3zQoKv"]
debug = True
# 群签名相关
groupobj = PairingGroup('SS512')
group_signature = GroupSignature(groupobj)
L = group_signature.LGen(n, k)
# 密钥相关
clientlist = []
for i in range(n):
clientlist.append(steem.Steem(nodes=[nodelist[i]], keys=keylist[i]))
vkliststr = []
uskliststr = []
vklist = []
usklist = []
# steem testchain信息
steembase.chains.known_chains['TEST'] = {
'chain_id': '18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e',
'prefix': 'TST', 'steem_symbol': 'TESTS', 'sbd_symbol': 'TBD', 'vests_symbol': 'VESTS'
}
groupID = "computer"
GID = group_signature.group.hash(groupID)
def main():
# 假设不存在不可用节点(无法判断节点状态)
userID = "zhou"
UID = group_signature.group.hash(userID)
print("uid", UID)
# 获取usk
pk, usk = get_usk(userID, GID, UID)
ssig, permlink = annoy_commit_tx(accountlist[0], usk, pk, GID, UID, clientlist[0].steemd, clientlist[0].wallet, title="paper_title",
body="paper_body")
sleep(3)
open_tx(accountlist[0], ssig, userID, permlink, clientlist[0].steemd, clientlist[0].wallet)
return
if __name__ == "__main__":
main()
print("end")
|
flexible
|
{
"blob_id": "a90b7e44cc54d4f96a13e5e6e2d15b632d3c4983",
"index": 290,
"step-1": "<mask token>\n\n\nclass GroupSignature:\n\n def __init__(self, groupObj):\n global util, group\n util = SecretUtil(groupObj, debug)\n self.group = groupObj\n\n def pkGen(self, h1str):\n gstr = (\n '[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'\n )\n g2str = (\n '[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'\n )\n u0str = (\n '[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'\n )\n u1str = (\n '[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'\n )\n u2str = (\n '[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'\n )\n u3str = (\n '[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'\n )\n u4str = (\n '[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'\n )\n hstr = (\n '[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'\n )\n nstr = (\n '[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'\n )\n g = self.group.fromstr(gstr, 10, G1)\n g2 = self.group.fromstr(g2str, 10, G2)\n u0 = self.group.fromstr(u0str, 10, G2)\n u1 = self.group.fromstr(u1str, 10, G2)\n u2 = self.group.fromstr(u2str, 10, G2)\n u3 = self.group.fromstr(u3str, 10, G2)\n u4 = self.group.fromstr(u4str, 10, G2)\n h = self.group.fromstr(hstr, 10, G1)\n n = self.group.fromstr(nstr, 10, GT)\n h1 = self.group.fromstr(h1str, 10, G1)\n pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,\n 'u4': u4, 'h': h, 'n': n, 'h1': h1}\n return pk\n\n def uskGen(self, usklist, pk, GID, UID, L, k):\n t1 = time()\n b0 = self.group.gen1_0(1)\n b3 = self.group.gen1_0(1)\n b4 = self.group.gen1_0(1)\n b5 = self.group.gen1_0(1)\n r2 = self.group.random(ZR)\n for i in range(k):\n b0 = b0 * usklist[i]['b0'] ** L[i]\n b3 = b3 * usklist[i]['b3'] ** L[i]\n b4 = b4 * usklist[i]['b4'] ** L[i]\n b5 = b5 * usklist[i]['b5'] ** L[i]\n b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2\n b3 = b3 * pk['u3'] ** r2\n b4 = b4 * pk['u4'] ** r2\n b5 = b5 * pk['g'] ** r2\n usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}\n t2 = time()\n with open('extracttime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n return usk\n\n def LGen(self, n, k):\n L = []\n I = self.group.random(ZR)\n J = self.group.random(ZR)\n for i in range(n):\n L.append(self.group.random(ZR))\n L[i].set(1)\n I.set(i + 1)\n for j in range(1, k + 1):\n print(j)\n J.set(j)\n if i + 1 != j:\n L[i] = L[i] * (J / (J - I))\n return L\n\n def verifyUsk(self, usk, vk, pk, GID, UID):\n g = pk['g']\n g2 = pk['g2']\n u0 = pk['u0']\n u1 = pk['u1']\n u2 = pk['u2']\n u3 = pk['u3']\n u4 = pk['u4']\n b0 = usk['b0']\n b5 = usk['b5']\n b3 = usk['b3']\n b4 = usk['b4']\n return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID\n ) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,\n b4) == pair(b5, u4)\n\n def sign(self, title, usk, pk, GID, UID, groupID):\n t1 = time()\n m = self.group.hash(title)\n b0 = usk['b0']\n b3 = usk['b3']\n b4 = usk['b4']\n b5 = usk['b5']\n r4 = self.group.random(ZR)\n r3 = self.group.random(ZR)\n k = self.group.random(ZR)\n c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[\n 'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3\n c5 = b5 * pk['g'] ** r3\n c6 = pk['u2'] ** UID * pk['u4'] ** r4\n e1 = pk['g'] ** k\n e2 = (pk['u0'] * pk['u1'] ** GID) ** k\n e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k\n f = pk['u0'] * pk['u1'] ** GID\n gp = pair(pk['h1'], pk['g2'])\n k1 = self.group.random(ZR)\n k2 = self.group.random(ZR)\n k3 = self.group.random(ZR)\n r1 = pk['u2'] ** k1 * pk['u4'] ** k2\n r2 = pk['g'] ** k3\n r3 = f ** k3\n t4 = pk['n'] ** k1 * gp ** k3\n hashstr = str(r1) + str(r2) + str(r3) + str(t4)\n c = self.group.hash(hashstr)\n s1 = k1 + c * UID\n s2 = k2 + c * r4\n s3 = k3 + c * k\n signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':\n e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}\n t2 = time()\n with open('gssigntime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('gs time', t2 - t1)\n return signature\n\n def open(self, okliststr, L, k):\n t1 = time()\n oklist = []\n for ok in okliststr:\n oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),\n 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})\n ok1 = self.group.gen1_0(1)\n ok2 = self.group.gen1_0(1)\n for i in range(k):\n ok1 = ok1 * oklist[i]['ok1'] ** L[i]\n ok2 = ok2 * oklist[i]['ok2'] ** L[i]\n t2 = time()\n with open('opentime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('open time', t2 - t1)\n return ok1 / ok2\n\n\n<mask token>\n\n\ndef creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title, body, groupID='computer')\n commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)\n return ssig, permlink, commit_tx\n\n\n<mask token>\n\n\ndef mul_tx_broad(txlist):\n threads = []\n for tx in txlist:\n t = MyThread(tx_broad, args=(tx,))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GroupSignature:\n\n def __init__(self, groupObj):\n global util, group\n util = SecretUtil(groupObj, debug)\n self.group = groupObj\n\n def pkGen(self, h1str):\n gstr = (\n '[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'\n )\n g2str = (\n '[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'\n )\n u0str = (\n '[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'\n )\n u1str = (\n '[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'\n )\n u2str = (\n '[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'\n )\n u3str = (\n '[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'\n )\n u4str = (\n '[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'\n )\n hstr = (\n '[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'\n )\n nstr = (\n '[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'\n )\n g = self.group.fromstr(gstr, 10, G1)\n g2 = self.group.fromstr(g2str, 10, G2)\n u0 = self.group.fromstr(u0str, 10, G2)\n u1 = self.group.fromstr(u1str, 10, G2)\n u2 = self.group.fromstr(u2str, 10, G2)\n u3 = self.group.fromstr(u3str, 10, G2)\n u4 = self.group.fromstr(u4str, 10, G2)\n h = self.group.fromstr(hstr, 10, G1)\n n = self.group.fromstr(nstr, 10, GT)\n h1 = self.group.fromstr(h1str, 10, G1)\n pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,\n 'u4': u4, 'h': h, 'n': n, 'h1': h1}\n return pk\n\n def uskGen(self, usklist, pk, GID, UID, L, k):\n t1 = time()\n b0 = self.group.gen1_0(1)\n b3 = self.group.gen1_0(1)\n b4 = self.group.gen1_0(1)\n b5 = self.group.gen1_0(1)\n r2 = self.group.random(ZR)\n for i in range(k):\n b0 = b0 * usklist[i]['b0'] ** L[i]\n b3 = b3 * usklist[i]['b3'] ** L[i]\n b4 = b4 * usklist[i]['b4'] ** L[i]\n b5 = b5 * usklist[i]['b5'] ** L[i]\n b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2\n b3 = b3 * pk['u3'] ** r2\n b4 = b4 * pk['u4'] ** r2\n b5 = b5 * pk['g'] ** r2\n usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}\n t2 = time()\n with open('extracttime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n return usk\n\n def LGen(self, n, k):\n L = []\n I = self.group.random(ZR)\n J = self.group.random(ZR)\n for i in range(n):\n L.append(self.group.random(ZR))\n L[i].set(1)\n I.set(i + 1)\n for j in range(1, k + 1):\n print(j)\n J.set(j)\n if i + 1 != j:\n L[i] = L[i] * (J / (J - I))\n return L\n\n def verifyUsk(self, usk, vk, pk, GID, UID):\n g = pk['g']\n g2 = pk['g2']\n u0 = pk['u0']\n u1 = pk['u1']\n u2 = pk['u2']\n u3 = pk['u3']\n u4 = pk['u4']\n b0 = usk['b0']\n b5 = usk['b5']\n b3 = usk['b3']\n b4 = usk['b4']\n return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID\n ) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,\n b4) == pair(b5, u4)\n\n def sign(self, title, usk, pk, GID, UID, groupID):\n t1 = time()\n m = self.group.hash(title)\n b0 = usk['b0']\n b3 = usk['b3']\n b4 = usk['b4']\n b5 = usk['b5']\n r4 = self.group.random(ZR)\n r3 = self.group.random(ZR)\n k = self.group.random(ZR)\n c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[\n 'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3\n c5 = b5 * pk['g'] ** r3\n c6 = pk['u2'] ** UID * pk['u4'] ** r4\n e1 = pk['g'] ** k\n e2 = (pk['u0'] * pk['u1'] ** GID) ** k\n e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k\n f = pk['u0'] * pk['u1'] ** GID\n gp = pair(pk['h1'], pk['g2'])\n k1 = self.group.random(ZR)\n k2 = self.group.random(ZR)\n k3 = self.group.random(ZR)\n r1 = pk['u2'] ** k1 * pk['u4'] ** k2\n r2 = pk['g'] ** k3\n r3 = f ** k3\n t4 = pk['n'] ** k1 * gp ** k3\n hashstr = str(r1) + str(r2) + str(r3) + str(t4)\n c = self.group.hash(hashstr)\n s1 = k1 + c * UID\n s2 = k2 + c * r4\n s3 = k3 + c * k\n signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':\n e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}\n t2 = time()\n with open('gssigntime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('gs time', t2 - t1)\n return signature\n\n def open(self, okliststr, L, k):\n t1 = time()\n oklist = []\n for ok in okliststr:\n oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),\n 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})\n ok1 = self.group.gen1_0(1)\n ok2 = self.group.gen1_0(1)\n for i in range(k):\n ok1 = ok1 * oklist[i]['ok1'] ** L[i]\n ok2 = ok2 * oklist[i]['ok2'] ** L[i]\n t2 = time()\n with open('opentime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('open time', t2 - t1)\n return ok1 / ok2\n\n\n<mask token>\n\n\ndef get_lam(sig):\n okliststr = []\n i = 0\n for client in clientlist:\n okstr = client.get_ok(str(sig['e1']), str(sig['e2']))\n print(okstr)\n okliststr.append(okstr)\n i = i + 1\n if i < k:\n print('the number of ok is not enough\\n')\n return\n lam = group_signature.open(okliststr, L, k)\n return lam\n\n\ndef tx_build_broad(op, steemd_instance, wallet_instance, account):\n tx = TransactionBuilder(steemd_instance=steemd_instance,\n wallet_instance=wallet_instance, no_broadcast=False)\n tx.appendOps(op)\n tx.appendSigner(account, 'posting')\n tx.sign()\n re = tx.broadcast()\n return re\n\n\n<mask token>\n\n\ndef annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title='paper_title', body='paper_body', groupID='computer')\n re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)\n print('commit-re', re)\n return ssig, permlink\n\n\n<mask token>\n\n\ndef one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):\n ssiglistone = []\n permlinklistone = []\n threads = []\n for i in range(nodeTX):\n t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID,\n steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglistone.append(ssig)\n permlinklistone.append(permlink)\n return ssiglistone, permlinklistone\n\n\ndef one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd,\n wallet):\n threads = []\n for i in range(nodeTX):\n t = MyThread(open_tx, args=(account, ssiglistone[i], userID,\n permlinklistone[i], steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\n<mask token>\n\n\ndef creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title, body, groupID='computer')\n commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)\n return ssig, permlink, commit_tx\n\n\ndef creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, ttitle='paper_title', tbody='paper_body'):\n ssiglist = []\n permlinklist = []\n txlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID,\n steemd_instance, wallet_instance, ttitle, tbody))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink, commit_tx = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n txlist.append(commit_tx)\n return ssiglist, permlinklist, txlist\n\n\ndef creat_open_tx(account, ssig, userID, permlink, steemd_instance,\n wallet_instance):\n openop = open_op(account, ssig, userID, permlink)\n open_tx = tx_build(openop, steemd_instance, wallet_instance, account)\n return open_tx\n\n\ndef creat_num_open_tx(num, account, ssiglist, userID, permlinklist,\n steemd_instance, wallet_instance):\n opentxlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_open_tx, args=(account, ssiglist[i], userID,\n permlinklist[i], steemd_instance, wallet_instance))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n opentx = t.get_result()\n opentxlist.append(opentx)\n return opentxlist\n\n\ndef tx_broad(tx):\n tx.broadcast()\n\n\ndef mul_tx_broad(txlist):\n threads = []\n for tx in txlist:\n t = MyThread(tx_broad, args=(tx,))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass GroupSignature:\n\n def __init__(self, groupObj):\n global util, group\n util = SecretUtil(groupObj, debug)\n self.group = groupObj\n\n def pkGen(self, h1str):\n gstr = (\n '[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'\n )\n g2str = (\n '[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'\n )\n u0str = (\n '[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'\n )\n u1str = (\n '[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'\n )\n u2str = (\n '[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'\n )\n u3str = (\n '[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'\n )\n u4str = (\n '[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'\n )\n hstr = (\n '[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'\n )\n nstr = (\n '[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'\n )\n g = self.group.fromstr(gstr, 10, G1)\n g2 = self.group.fromstr(g2str, 10, G2)\n u0 = self.group.fromstr(u0str, 10, G2)\n u1 = self.group.fromstr(u1str, 10, G2)\n u2 = self.group.fromstr(u2str, 10, G2)\n u3 = self.group.fromstr(u3str, 10, G2)\n u4 = self.group.fromstr(u4str, 10, G2)\n h = self.group.fromstr(hstr, 10, G1)\n n = self.group.fromstr(nstr, 10, GT)\n h1 = self.group.fromstr(h1str, 10, G1)\n pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,\n 'u4': u4, 'h': h, 'n': n, 'h1': h1}\n return pk\n\n def uskGen(self, usklist, pk, GID, UID, L, k):\n t1 = time()\n b0 = self.group.gen1_0(1)\n b3 = self.group.gen1_0(1)\n b4 = self.group.gen1_0(1)\n b5 = self.group.gen1_0(1)\n r2 = self.group.random(ZR)\n for i in range(k):\n b0 = b0 * usklist[i]['b0'] ** L[i]\n b3 = b3 * usklist[i]['b3'] ** L[i]\n b4 = b4 * usklist[i]['b4'] ** L[i]\n b5 = b5 * usklist[i]['b5'] ** L[i]\n b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2\n b3 = b3 * pk['u3'] ** r2\n b4 = b4 * pk['u4'] ** r2\n b5 = b5 * pk['g'] ** r2\n usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}\n t2 = time()\n with open('extracttime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n return usk\n\n def LGen(self, n, k):\n L = []\n I = self.group.random(ZR)\n J = self.group.random(ZR)\n for i in range(n):\n L.append(self.group.random(ZR))\n L[i].set(1)\n I.set(i + 1)\n for j in range(1, k + 1):\n print(j)\n J.set(j)\n if i + 1 != j:\n L[i] = L[i] * (J / (J - I))\n return L\n\n def verifyUsk(self, usk, vk, pk, GID, UID):\n g = pk['g']\n g2 = pk['g2']\n u0 = pk['u0']\n u1 = pk['u1']\n u2 = pk['u2']\n u3 = pk['u3']\n u4 = pk['u4']\n b0 = usk['b0']\n b5 = usk['b5']\n b3 = usk['b3']\n b4 = usk['b4']\n return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID\n ) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,\n b4) == pair(b5, u4)\n\n def sign(self, title, usk, pk, GID, UID, groupID):\n t1 = time()\n m = self.group.hash(title)\n b0 = usk['b0']\n b3 = usk['b3']\n b4 = usk['b4']\n b5 = usk['b5']\n r4 = self.group.random(ZR)\n r3 = self.group.random(ZR)\n k = self.group.random(ZR)\n c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[\n 'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3\n c5 = b5 * pk['g'] ** r3\n c6 = pk['u2'] ** UID * pk['u4'] ** r4\n e1 = pk['g'] ** k\n e2 = (pk['u0'] * pk['u1'] ** GID) ** k\n e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k\n f = pk['u0'] * pk['u1'] ** GID\n gp = pair(pk['h1'], pk['g2'])\n k1 = self.group.random(ZR)\n k2 = self.group.random(ZR)\n k3 = self.group.random(ZR)\n r1 = pk['u2'] ** k1 * pk['u4'] ** k2\n r2 = pk['g'] ** k3\n r3 = f ** k3\n t4 = pk['n'] ** k1 * gp ** k3\n hashstr = str(r1) + str(r2) + str(r3) + str(t4)\n c = self.group.hash(hashstr)\n s1 = k1 + c * UID\n s2 = k2 + c * r4\n s3 = k3 + c * k\n signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':\n e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}\n t2 = time()\n with open('gssigntime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('gs time', t2 - t1)\n return signature\n\n def open(self, okliststr, L, k):\n t1 = time()\n oklist = []\n for ok in okliststr:\n oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),\n 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})\n ok1 = self.group.gen1_0(1)\n ok2 = self.group.gen1_0(1)\n for i in range(k):\n ok1 = ok1 * oklist[i]['ok1'] ** L[i]\n ok2 = ok2 * oklist[i]['ok2'] ** L[i]\n t2 = time()\n with open('opentime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('open time', t2 - t1)\n return ok1 / ok2\n\n\n<mask token>\n\n\ndef get_lam(sig):\n okliststr = []\n i = 0\n for client in clientlist:\n okstr = client.get_ok(str(sig['e1']), str(sig['e2']))\n print(okstr)\n okliststr.append(okstr)\n i = i + 1\n if i < k:\n print('the number of ok is not enough\\n')\n return\n lam = group_signature.open(okliststr, L, k)\n return lam\n\n\ndef tx_build_broad(op, steemd_instance, wallet_instance, account):\n tx = TransactionBuilder(steemd_instance=steemd_instance,\n wallet_instance=wallet_instance, no_broadcast=False)\n tx.appendOps(op)\n tx.appendSigner(account, 'posting')\n tx.sign()\n re = tx.broadcast()\n return re\n\n\n<mask token>\n\n\ndef annoy_commit(account, usk, pk, GID, UID, title='paper_title', body=\n 'paper_body', groupID='computer'):\n annoy_author = 'nya'\n sig = group_signature.sign(title, usk, pk, GID, UID, groupID)\n permlink = ''.join(random.choices(string.digits, k=7))\n print('permlink is ' + permlink)\n op = operations.CommitPaper(**{'account': account, 'author':\n annoy_author, 'permlink': permlink, 'title': title, 'body': body,\n 'json_metadata': '', 'c0': str(sig['c0']), 'c5': str(sig['c5']),\n 'c6': str(sig['c6']), 'e1': str(sig['e1']), 'e2': str(sig['e2']),\n 'e3': str(sig['e3']), 'c': str(sig['c']), 's1': str(sig['s1']),\n 's2': str(sig['s2']), 's3': str(sig['s3'])})\n print('commitop', op)\n return op, sig, permlink\n\n\n<mask token>\n\n\ndef annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title='paper_title', body='paper_body', groupID='computer')\n re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)\n print('commit-re', re)\n return ssig, permlink\n\n\n<mask token>\n\n\ndef one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):\n ssiglistone = []\n permlinklistone = []\n threads = []\n for i in range(nodeTX):\n t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID,\n steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglistone.append(ssig)\n permlinklistone.append(permlink)\n return ssiglistone, permlinklistone\n\n\ndef one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd,\n wallet):\n threads = []\n for i in range(nodeTX):\n t = MyThread(open_tx, args=(account, ssiglistone[i], userID,\n permlinklistone[i], steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\ndef mul_annoy_tx(usk, pk, UID):\n ssiglist = []\n permlinklist = []\n threads = []\n for i in range(n):\n t = MyThread(one_mul_annoy_tx, args=(accountlist[i], usk, pk, UID,\n clientlist[i].steemd, clientlist[i].wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n return ssiglist, permlinklist\n\n\ndef mul_open_tx(ssiglist, permlinklist, userID):\n threads = []\n for i in range(n):\n t = MyThread(one_mul_open_tx, args=(accountlist[i], ssiglist[i],\n userID, permlinklist[i], clientlist[i].steemd, clientlist[i].\n wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\ndef creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title, body, groupID='computer')\n commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)\n return ssig, permlink, commit_tx\n\n\ndef creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, ttitle='paper_title', tbody='paper_body'):\n ssiglist = []\n permlinklist = []\n txlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID,\n steemd_instance, wallet_instance, ttitle, tbody))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink, commit_tx = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n txlist.append(commit_tx)\n return ssiglist, permlinklist, txlist\n\n\ndef creat_open_tx(account, ssig, userID, permlink, steemd_instance,\n wallet_instance):\n openop = open_op(account, ssig, userID, permlink)\n open_tx = tx_build(openop, steemd_instance, wallet_instance, account)\n return open_tx\n\n\ndef creat_num_open_tx(num, account, ssiglist, userID, permlinklist,\n steemd_instance, wallet_instance):\n opentxlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_open_tx, args=(account, ssiglist[i], userID,\n permlinklist[i], steemd_instance, wallet_instance))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n opentx = t.get_result()\n opentxlist.append(opentx)\n return opentxlist\n\n\ndef tx_broad(tx):\n tx.broadcast()\n\n\ndef mul_tx_broad(txlist):\n threads = []\n for tx in txlist:\n t = MyThread(tx_broad, args=(tx,))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass GroupSignature:\n\n def __init__(self, groupObj):\n global util, group\n util = SecretUtil(groupObj, debug)\n self.group = groupObj\n\n def pkGen(self, h1str):\n gstr = (\n '[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]'\n )\n g2str = (\n '[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]'\n )\n u0str = (\n '[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]'\n )\n u1str = (\n '[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]'\n )\n u2str = (\n '[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]'\n )\n u3str = (\n '[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]'\n )\n u4str = (\n '[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]'\n )\n hstr = (\n '[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]'\n )\n nstr = (\n '[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]'\n )\n g = self.group.fromstr(gstr, 10, G1)\n g2 = self.group.fromstr(g2str, 10, G2)\n u0 = self.group.fromstr(u0str, 10, G2)\n u1 = self.group.fromstr(u1str, 10, G2)\n u2 = self.group.fromstr(u2str, 10, G2)\n u3 = self.group.fromstr(u3str, 10, G2)\n u4 = self.group.fromstr(u4str, 10, G2)\n h = self.group.fromstr(hstr, 10, G1)\n n = self.group.fromstr(nstr, 10, GT)\n h1 = self.group.fromstr(h1str, 10, G1)\n pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3,\n 'u4': u4, 'h': h, 'n': n, 'h1': h1}\n return pk\n\n def uskGen(self, usklist, pk, GID, UID, L, k):\n t1 = time()\n b0 = self.group.gen1_0(1)\n b3 = self.group.gen1_0(1)\n b4 = self.group.gen1_0(1)\n b5 = self.group.gen1_0(1)\n r2 = self.group.random(ZR)\n for i in range(k):\n b0 = b0 * usklist[i]['b0'] ** L[i]\n b3 = b3 * usklist[i]['b3'] ** L[i]\n b4 = b4 * usklist[i]['b4'] ** L[i]\n b5 = b5 * usklist[i]['b5'] ** L[i]\n b0 = b0 * (pk['u0'] * pk['u1'] ** GID * pk['u2'] ** UID) ** r2\n b3 = b3 * pk['u3'] ** r2\n b4 = b4 * pk['u4'] ** r2\n b5 = b5 * pk['g'] ** r2\n usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}\n t2 = time()\n with open('extracttime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n return usk\n\n def LGen(self, n, k):\n L = []\n I = self.group.random(ZR)\n J = self.group.random(ZR)\n for i in range(n):\n L.append(self.group.random(ZR))\n L[i].set(1)\n I.set(i + 1)\n for j in range(1, k + 1):\n print(j)\n J.set(j)\n if i + 1 != j:\n L[i] = L[i] * (J / (J - I))\n return L\n\n def verifyUsk(self, usk, vk, pk, GID, UID):\n g = pk['g']\n g2 = pk['g2']\n u0 = pk['u0']\n u1 = pk['u1']\n u2 = pk['u2']\n u3 = pk['u3']\n u4 = pk['u4']\n b0 = usk['b0']\n b5 = usk['b5']\n b3 = usk['b3']\n b4 = usk['b4']\n return pair(g, b0) == pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID\n ) * pair(b5, u2 ** UID) and pair(g, b3) == pair(b5, u3) and pair(g,\n b4) == pair(b5, u4)\n\n def sign(self, title, usk, pk, GID, UID, groupID):\n t1 = time()\n m = self.group.hash(title)\n b0 = usk['b0']\n b3 = usk['b3']\n b4 = usk['b4']\n b5 = usk['b5']\n r4 = self.group.random(ZR)\n r3 = self.group.random(ZR)\n k = self.group.random(ZR)\n c0 = b0 * b3 ** m * b4 ** r4 * (pk['u0'] * pk['u1'] ** GID * pk[\n 'u2'] ** UID * pk['u3'] ** m * pk['u4'] ** r4) ** r3\n c5 = b5 * pk['g'] ** r3\n c6 = pk['u2'] ** UID * pk['u4'] ** r4\n e1 = pk['g'] ** k\n e2 = (pk['u0'] * pk['u1'] ** GID) ** k\n e3 = pk['n'] ** UID * pair(pk['h1'], pk['g2']) ** k\n f = pk['u0'] * pk['u1'] ** GID\n gp = pair(pk['h1'], pk['g2'])\n k1 = self.group.random(ZR)\n k2 = self.group.random(ZR)\n k3 = self.group.random(ZR)\n r1 = pk['u2'] ** k1 * pk['u4'] ** k2\n r2 = pk['g'] ** k3\n r3 = f ** k3\n t4 = pk['n'] ** k1 * gp ** k3\n hashstr = str(r1) + str(r2) + str(r3) + str(t4)\n c = self.group.hash(hashstr)\n s1 = k1 + c * UID\n s2 = k2 + c * r4\n s3 = k3 + c * k\n signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3':\n e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}\n t2 = time()\n with open('gssigntime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('gs time', t2 - t1)\n return signature\n\n def open(self, okliststr, L, k):\n t1 = time()\n oklist = []\n for ok in okliststr:\n oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT),\n 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})\n ok1 = self.group.gen1_0(1)\n ok2 = self.group.gen1_0(1)\n for i in range(k):\n ok1 = ok1 * oklist[i]['ok1'] ** L[i]\n ok2 = ok2 * oklist[i]['ok2'] ** L[i]\n t2 = time()\n with open('opentime.txt', 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print('open time', t2 - t1)\n return ok1 / ok2\n\n\ndef get_usk(userID, GID, UID, h1str='', count=0):\n pk = {}\n for i in range(n):\n vkliststr.append(clientlist[i].get_vk()['vk'])\n vklist.append(group_signature.group.fromstr(vkliststr[i], 10, G1))\n uskliststr.append(clientlist[i].user_extract(userID))\n usklist.append({})\n usklist[i]['b0'] = group_signature.group.fromstr(uskliststr[i]['b0'\n ], 10, G2)\n usklist[i]['b3'] = group_signature.group.fromstr(uskliststr[i]['b3'\n ], 10, G2)\n usklist[i]['b4'] = group_signature.group.fromstr(uskliststr[i]['b4'\n ], 10, G2)\n usklist[i]['b5'] = group_signature.group.fromstr(uskliststr[i]['b5'\n ], 10, G1)\n print(usklist[i])\n if h1str == '' or h1str == '0' or h1str == 0:\n h1str = clientlist[i].get_pk()['pk']\n print('h1str', h1str)\n pk = group_signature.pkGen(h1str)\n print('pk---------------\\n', pk)\n if group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID):\n count = count + 1\n else:\n print('key is invalide\\n\\n')\n usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)\n print('usk---------------\\n', usk)\n return pk, usk\n\n\ndef get_lam(sig):\n okliststr = []\n i = 0\n for client in clientlist:\n okstr = client.get_ok(str(sig['e1']), str(sig['e2']))\n print(okstr)\n okliststr.append(okstr)\n i = i + 1\n if i < k:\n print('the number of ok is not enough\\n')\n return\n lam = group_signature.open(okliststr, L, k)\n return lam\n\n\ndef tx_build_broad(op, steemd_instance, wallet_instance, account):\n tx = TransactionBuilder(steemd_instance=steemd_instance,\n wallet_instance=wallet_instance, no_broadcast=False)\n tx.appendOps(op)\n tx.appendSigner(account, 'posting')\n tx.sign()\n re = tx.broadcast()\n return re\n\n\n<mask token>\n\n\ndef annoy_commit(account, usk, pk, GID, UID, title='paper_title', body=\n 'paper_body', groupID='computer'):\n annoy_author = 'nya'\n sig = group_signature.sign(title, usk, pk, GID, UID, groupID)\n permlink = ''.join(random.choices(string.digits, k=7))\n print('permlink is ' + permlink)\n op = operations.CommitPaper(**{'account': account, 'author':\n annoy_author, 'permlink': permlink, 'title': title, 'body': body,\n 'json_metadata': '', 'c0': str(sig['c0']), 'c5': str(sig['c5']),\n 'c6': str(sig['c6']), 'e1': str(sig['e1']), 'e2': str(sig['e2']),\n 'e3': str(sig['e3']), 'c': str(sig['c']), 's1': str(sig['s1']),\n 's2': str(sig['s2']), 's3': str(sig['s3'])})\n print('commitop', op)\n return op, sig, permlink\n\n\n<mask token>\n\n\ndef annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title='paper_title', body='paper_body', groupID='computer')\n re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)\n print('commit-re', re)\n return ssig, permlink\n\n\n<mask token>\n\n\ndef one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):\n ssiglistone = []\n permlinklistone = []\n threads = []\n for i in range(nodeTX):\n t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID,\n steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglistone.append(ssig)\n permlinklistone.append(permlink)\n return ssiglistone, permlinklistone\n\n\ndef one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd,\n wallet):\n threads = []\n for i in range(nodeTX):\n t = MyThread(open_tx, args=(account, ssiglistone[i], userID,\n permlinklistone[i], steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\ndef mul_annoy_tx(usk, pk, UID):\n ssiglist = []\n permlinklist = []\n threads = []\n for i in range(n):\n t = MyThread(one_mul_annoy_tx, args=(accountlist[i], usk, pk, UID,\n clientlist[i].steemd, clientlist[i].wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n return ssiglist, permlinklist\n\n\ndef mul_open_tx(ssiglist, permlinklist, userID):\n threads = []\n for i in range(n):\n t = MyThread(one_mul_open_tx, args=(accountlist[i], ssiglist[i],\n userID, permlinklist[i], clientlist[i].steemd, clientlist[i].\n wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\ndef creat_commit_tx(account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, title='paper_title', body='paper_body'):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID,\n title, body, groupID='computer')\n commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)\n return ssig, permlink, commit_tx\n\n\ndef creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance,\n wallet_instance, ttitle='paper_title', tbody='paper_body'):\n ssiglist = []\n permlinklist = []\n txlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID,\n steemd_instance, wallet_instance, ttitle, tbody))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink, commit_tx = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n txlist.append(commit_tx)\n return ssiglist, permlinklist, txlist\n\n\ndef creat_open_tx(account, ssig, userID, permlink, steemd_instance,\n wallet_instance):\n openop = open_op(account, ssig, userID, permlink)\n open_tx = tx_build(openop, steemd_instance, wallet_instance, account)\n return open_tx\n\n\ndef creat_num_open_tx(num, account, ssiglist, userID, permlinklist,\n steemd_instance, wallet_instance):\n opentxlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_open_tx, args=(account, ssiglist[i], userID,\n permlinklist[i], steemd_instance, wallet_instance))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n opentx = t.get_result()\n opentxlist.append(opentx)\n return opentxlist\n\n\ndef tx_broad(tx):\n tx.broadcast()\n\n\ndef mul_tx_broad(txlist):\n threads = []\n for tx in txlist:\n t = MyThread(tx_broad, args=(tx,))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\n<mask token>\n\n\ndef main():\n userID = 'zhou'\n UID = group_signature.group.hash(userID)\n print('uid', UID)\n pk, usk = get_usk(userID, GID, UID)\n ssig, permlink = annoy_commit_tx(accountlist[0], usk, pk, GID, UID,\n clientlist[0].steemd, clientlist[0].wallet, title='paper_title',\n body='paper_body')\n sleep(3)\n open_tx(accountlist[0], ssig, userID, permlink, clientlist[0].steemd,\n clientlist[0].wallet)\n return\n\n\n<mask token>\n",
"step-5": "import random\nimport string\nimport steembase\nimport struct\nimport steem\nfrom time import sleep\nfrom time import time\nfrom steem.transactionbuilder import TransactionBuilder\nfrom steembase import operations\nfrom steembase.transactions import SignedTransaction\nfrom resultthread import MyThread\nfrom charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, GT, pair\nfrom charm.toolbox.secretutil import SecretUtil\n\n\nclass GroupSignature():\n\n def __init__(self, groupObj):\n global util, group\n util = SecretUtil(groupObj, debug)\n self.group = groupObj\n\n def pkGen(self, h1str):\n gstr = \"[6172776968119684165170291368128433652817636448173749093457023424948260385279837018774774149930982188956916913145008943931711059687988096415181819433817738, 8687587692191287108886119971783525001480020593934954052605681527814232399216375005546606067382536684351686344089456732201641997200939472924879001214689004]\"\n g2str = \"[7648994551207171188393784904797547917038803147671542540175090956205316897431443264058433935237605598252399113847934759009659621851760599508222321653067284, 922489308494109901795721463782161260386164061515796674638135394871842997698175772871045949554746517321480649326465484116060959631197509151923296896589720]\"\n u0str = \"[180015966842918451436547451263180245588308971597733548673037049536176684754209695288737508087729924028686259002375511049961436438196866049956546630518033, 1295050197915669955783867959538729894307963685491173858450359845766785488725907727220684060845012524740394664162328817669422178637925195059862486690053923]\"\n u1str = \"[2555472719769037960206282327195096320915753855199743796256065902544200822503613205017219993060986152240852358189992579821797745072366030183800897743028220, 7573705235093543416041007636313631591000596820214067724084077929638801811700093589294454562385664531190678890366928407286293582994146887505184778221562373]\"\n u2str = \"[6876276970903121931083294698771200898345396507892092532649392211995185517437159402176975528760594250374462299539306423347676182899798006533425047523984724, 5323739238507219125881988073888745575030677585404965990610324901624530474522642705344792075909082041735695801098770187248023797265998906693745587936574078]\"\n u3str = \"[6628726193389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]\"\n u4str = \"[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]\"\n hstr = \"[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]\"\n nstr = \"[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]\"\n\n g = self.group.fromstr(gstr, 10, G1)\n g2 = self.group.fromstr(g2str, 10, G2)\n u0 = self.group.fromstr(u0str, 10, G2)\n u1 = self.group.fromstr(u1str, 10, G2)\n u2 = self.group.fromstr(u2str, 10, G2)\n u3 = self.group.fromstr(u3str, 10, G2)\n u4 = self.group.fromstr(u4str, 10, G2)\n h = self.group.fromstr(hstr, 10, G1)\n n = self.group.fromstr(nstr, 10, GT)\n h1 = self.group.fromstr(h1str, 10, G1)\n\n pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3, 'u4': u4, 'h': h, 'n': n, 'h1': h1}\n\n return pk\n\n def uskGen(self, usklist, pk, GID, UID, L, k):\n t1 = time()\n b0 = self.group.gen1_0(1)\n b3 = self.group.gen1_0(1)\n b4 = self.group.gen1_0(1)\n b5 = self.group.gen1_0(1)\n\n r2 = self.group.random(ZR)\n\n for i in range(k):\n b0 = b0 * (usklist[i]['b0'] ** L[i])\n b3 = b3 * (usklist[i]['b3'] ** L[i])\n b4 = b4 * (usklist[i]['b4'] ** L[i])\n b5 = b5 * (usklist[i]['b5'] ** L[i])\n\n b0 = b0 * (pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID)) ** r2\n b3 = b3 * (pk['u3'] ** r2)\n b4 = b4 * (pk['u4'] ** r2)\n b5 = b5 * (pk['g'] ** r2)\n\n usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}\n t2 = time()\n with open(\"extracttime.txt\", 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n return usk\n\n def LGen(self, n, k):\n L = []\n I = self.group.random(ZR)\n J = self.group.random(ZR)\n for i in range(n):\n L.append(self.group.random(ZR))\n L[i].set(1)\n I.set(i + 1)\n for j in range(1, k + 1):\n print(j)\n J.set(j)\n if (i + 1) != j:\n L[i] = L[i] * ((J) / (J - I))\n return L\n\n def verifyUsk(self, usk, vk, pk, GID, UID):\n g = pk['g']\n g2 = pk['g2']\n u0 = pk['u0']\n u1 = pk['u1']\n u2 = pk['u2']\n u3 = pk['u3']\n u4 = pk['u4']\n\n b0 = usk['b0']\n b5 = usk['b5']\n b3 = usk['b3']\n b4 = usk['b4']\n\n return pair(g, b0) == (pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID) * pair(b5, u2 ** UID)) and pair(g,\n b3) == pair(\n b5, u3) and pair(g, b4) == pair(b5, u4)\n\n def sign(self, title, usk, pk, GID, UID, groupID):\n t1 = time()\n m = self.group.hash(title)\n b0 = usk['b0']\n b3 = usk['b3']\n b4 = usk['b4']\n b5 = usk['b5']\n\n r4 = self.group.random(ZR)\n r3 = self.group.random(ZR)\n k = self.group.random(ZR)\n\n c0 = b0 * (b3 ** m) * (b4 ** r4) * (\n (pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID) * (pk['u3'] ** m) * (pk['u4'] ** r4)) ** r3)\n c5 = b5 * (pk['g'] ** r3)\n c6 = (pk['u2'] ** UID) * (pk['u4'] ** r4)\n e1 = pk['g'] ** k\n e2 = (pk['u0'] * (pk['u1'] ** GID)) ** k\n e3 = (pk['n'] ** UID) * (pair(pk['h1'], pk['g2']) ** k)\n\n # 产生pok\n f = pk['u0'] * (pk['u1'] ** GID)\n gp = pair(pk['h1'], pk['g2'])\n\n k1 = self.group.random(ZR)\n k2 = self.group.random(ZR)\n k3 = self.group.random(ZR)\n\n r1 = (pk['u2'] ** k1) * (pk['u4'] ** k2)\n r2 = pk['g'] ** k3\n r3 = f ** k3\n t4 = (pk['n'] ** k1) * (gp ** k3)\n\n hashstr = str(r1) + str(r2) + str(r3) + str(t4)\n\n c = self.group.hash(hashstr)\n\n s1 = k1 + c * UID\n\n s2 = k2 + c * r4\n\n s3 = k3 + c * k\n\n signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3': e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}\n t2 = time()\n with open(\"gssigntime.txt\", 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print(\"gs time\", t2 - t1)\n return signature\n\n def open(self, okliststr, L, k):\n t1 = time()\n oklist = []\n for ok in okliststr:\n oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT), 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})\n ok1 = self.group.gen1_0(1)\n ok2 = self.group.gen1_0(1)\n for i in range(k):\n ok1 = ok1 * (oklist[i]['ok1'] ** L[i])\n ok2 = ok2 * (oklist[i]['ok2'] ** L[i])\n t2 = time()\n with open(\"opentime.txt\", 'a') as f:\n f.write(str(t2 - t1))\n f.write('\\n')\n print(\"open time\", t2 - t1)\n return ok1 / ok2\n\n\ndef get_usk(userID, GID, UID, h1str=\"\", count=0):\n pk = {}\n for i in range(n):\n vkliststr.append(clientlist[i].get_vk()['vk'])\n vklist.append(group_signature.group.fromstr(vkliststr[i], 10, G1))\n\n uskliststr.append(clientlist[i].user_extract(userID))\n usklist.append({})\n usklist[i]['b0'] = group_signature.group.fromstr(uskliststr[i]['b0'], 10, G2)\n usklist[i]['b3'] = group_signature.group.fromstr(uskliststr[i]['b3'], 10, G2)\n usklist[i]['b4'] = group_signature.group.fromstr(uskliststr[i]['b4'], 10, G2)\n usklist[i]['b5'] = group_signature.group.fromstr(uskliststr[i]['b5'], 10, G1)\n print(usklist[i])\n if h1str == \"\" or h1str == \"0\" or h1str == 0:\n h1str = clientlist[i].get_pk()['pk']\n print(\"h1str\", h1str)\n pk = group_signature.pkGen(h1str)\n print(\"pk---------------\\n\", pk)\n\n if (group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID)):\n count = count + 1\n else:\n print(\"key is invalide\\n\\n\")\n usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)\n\n print(\"usk---------------\\n\", usk)\n return pk, usk\n\n\ndef get_lam(sig):\n okliststr = []\n i = 0\n for client in clientlist:\n okstr = client.get_ok(str(sig['e1']), str(sig['e2']))\n print(okstr)\n okliststr.append(okstr)\n i = i + 1\n\n if i < k:\n print(\"the number of ok is not enough\\n\")\n return\n\n lam = group_signature.open(okliststr, L, k)\n return lam\n\n\ndef tx_build_broad(op, steemd_instance, wallet_instance, account):\n tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,\n no_broadcast=False)\n tx.appendOps(op)\n tx.appendSigner(account, 'posting')\n tx.sign()\n # print(\"txsign\",tx)\n re = tx.broadcast()\n return re\n\n\ndef tx_build(op, steemd_instance, wallet_instance, account):\n tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,\n no_broadcast=False)\n tx.appendOps(op)\n tx.appendSigner(account, 'posting')\n tx.sign()\n # print(\"txsign\",tx)\n # re = tx.broadcast()\n return tx\n\n\ndef annoy_commit(account, usk, pk, GID, UID, title=\"paper_title\", body=\"paper_body\", groupID=\"computer\"):\n annoy_author = 'nya'\n # group signature ------title 必须 这里面是对title进行hash 然后使用usk对hash进行签名\n sig = group_signature.sign(title, usk, pk, GID, UID, groupID)\n\n permlink = ''.join(random.choices(string.digits, k=7))\n print(\"permlink is \" + permlink)\n op = operations.CommitPaper(\n **{\n \"account\": account,\n \"author\": annoy_author,\n \"permlink\": permlink,\n \"title\": title,\n \"body\": body,\n \"json_metadata\": \"\",\n \"c0\": str(sig['c0']),\n \"c5\": str(sig['c5']),\n \"c6\": str(sig['c6']),\n \"e1\": str(sig['e1']),\n \"e2\": str(sig['e2']),\n \"e3\": str(sig['e3']),\n \"c\": str(sig['c']),\n \"s1\": str(sig['s1']),\n \"s2\": str(sig['s2']),\n \"s3\": str(sig['s3'])\n }\n )\n print(\"commitop\", op)\n return op, sig, permlink\n\n\ndef open_op(account, sig, userID, permlink):\n lam = get_lam(sig)\n # E = (pk['n'] ** UID) * lam #计算出e3 即签名的e3 判断是否相等\n op = operations.ApplyOpen(\n **{\n 'account': account,\n 'author': userID,\n 'lambda': str(lam),\n 'permlink': permlink,\n 'json_metadata': \"\"\n }\n )\n return op\n\n\ndef annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title=\"paper_title\",\n body=\"paper_body\"):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title=\"paper_title\", body=\"paper_body\",\n groupID=\"computer\")\n re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)\n print(\"commit-re\", re)\n return ssig, permlink\n\n\ndef open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):\n openop = open_op(account, ssig, userID, permlink)\n re = tx_build_broad(openop, steemd_instance, wallet_instance, account)\n print(\"open-re\", re)\n\n\n# 一个节点的 并发产生交易\ndef one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):\n ssiglistone = []\n permlinklistone = []\n threads = []\n for i in range(nodeTX):\n t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID, steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglistone.append(ssig)\n permlinklistone.append(permlink)\n return ssiglistone, permlinklistone\n\n\ndef one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd, wallet):\n threads = []\n for i in range(nodeTX):\n t = MyThread(open_tx,\n args=(account, ssiglistone[i], userID, permlinklistone[i], steemd, wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\ndef mul_annoy_tx(usk, pk, UID):\n ssiglist = []\n permlinklist = []\n threads = []\n for i in range(n):\n # t = MyThread(annoy_commit_tx, args=(accountlist[i], usk, pk, GID, UID, clientlist[i].steemd, clientlist[i].wallet))\n t = MyThread(one_mul_annoy_tx,\n args=(accountlist[i], usk, pk, UID, clientlist[i].steemd, clientlist[i].wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n return ssiglist, permlinklist\n\n\n# 多个节点, 每个节点并发\ndef mul_open_tx(ssiglist, permlinklist, userID):\n threads = []\n for i in range(n):\n # t = MyThread(open_tx,\n # args=(accountlist[i], ssiglist[i], userID, permlinklist[i], clientlist[i].steemd, clientlist[i].wallet))\n t = MyThread(one_mul_open_tx,\n args=(\n accountlist[i], ssiglist[i], userID, permlinklist[i], clientlist[i].steemd, clientlist[i].wallet))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n # for t in threads:\n # t.get_result()\n\n\n# 仅创造tx 不广播\ndef creat_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title=\"paper_title\",\n body=\"paper_body\"):\n commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title, body, groupID=\"computer\")\n commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)\n return ssig, permlink, commit_tx\n\n\ndef creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance, wallet_instance, ttitle=\"paper_title\",\n tbody=\"paper_body\"):\n ssiglist = []\n permlinklist = []\n txlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID, steemd_instance, wallet_instance, ttitle,\n tbody))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n ssig, permlink, commit_tx = t.get_result()\n ssiglist.append(ssig)\n permlinklist.append(permlink)\n txlist.append(commit_tx)\n return ssiglist, permlinklist, txlist\n\n\ndef creat_open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):\n openop = open_op(account, ssig, userID, permlink)\n open_tx = tx_build(openop, steemd_instance, wallet_instance, account)\n return open_tx\n\n\ndef creat_num_open_tx(num, account, ssiglist, userID, permlinklist, steemd_instance, wallet_instance):\n opentxlist = []\n threads = []\n for i in range(num):\n t = MyThread(creat_open_tx,\n args=(account, ssiglist[i], userID, permlinklist[i], steemd_instance,\n wallet_instance))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n for t in threads:\n opentx = t.get_result()\n opentxlist.append(opentx)\n return opentxlist\n\n\ndef tx_broad(tx):\n tx.broadcast()\n\n\ndef mul_tx_broad(txlist):\n threads = []\n for tx in txlist:\n t = MyThread(tx_broad, args=(tx,))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n\n# public parma\nnodeTX = 5\nk = 2\nn = 3 # (k,n)\n# 节点地址\nnodelist = [\n 'http://101.76.208.83:8090',\n 'http://101.76.208.83:8094',\n 'http://101.76.208.83:8098'\n\n]\naccountlist = [\"initminer2\", \"zy1\", \"zy2\", \"zy3\", \"zy4\", \"zy5\", \"zy6\", \"zy7\", \"zy8\", \"zy9\", \"zy10\", \"zy11\", \"zy12\",\n \"zy13\", \"zy14\", \"zy15\", \"zy16\", \"zy17\", \"zy18\", \"zy19\", \"zy20\"]\n# 除了第一个 其他的都是posting key 5Hs4jcm5X4sanCnUKNFCjrq2irN8sH1Krzsb13Qd6DHqutZbhqu\nkeylist = ['5J3yMruND2TADZ7cZc6Cnp4VePrnehei2wvGdnLgf3aEj2nDGhc', '5Hs4jcm5X4sanCnUKNFCjrq2irN8sH1Krzsb13Qd6DHqutZbhqu', \"5KPLLsQ3MuWgKvNYqAFRjziWZenBqefDhSe4K1uYuj8hT3zQoKv\"]\ndebug = True\n# 群签名相关\ngroupobj = PairingGroup('SS512')\ngroup_signature = GroupSignature(groupobj)\nL = group_signature.LGen(n, k)\n# 密钥相关\nclientlist = []\nfor i in range(n):\n clientlist.append(steem.Steem(nodes=[nodelist[i]], keys=keylist[i]))\n\nvkliststr = []\nuskliststr = []\nvklist = []\nusklist = []\n# steem testchain信息\nsteembase.chains.known_chains['TEST'] = {\n 'chain_id': '18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e',\n 'prefix': 'TST', 'steem_symbol': 'TESTS', 'sbd_symbol': 'TBD', 'vests_symbol': 'VESTS'\n}\ngroupID = \"computer\"\nGID = group_signature.group.hash(groupID)\n\n\ndef main():\n # 假设不存在不可用节点(无法判断节点状态)\n userID = \"zhou\"\n UID = group_signature.group.hash(userID)\n print(\"uid\", UID)\n # 获取usk\n pk, usk = get_usk(userID, GID, UID)\n\n ssig, permlink = annoy_commit_tx(accountlist[0], usk, pk, GID, UID, clientlist[0].steemd, clientlist[0].wallet, title=\"paper_title\",\n body=\"paper_body\")\n sleep(3)\n open_tx(accountlist[0], ssig, userID, permlink, clientlist[0].steemd, clientlist[0].wallet)\n return\n\n\nif __name__ == \"__main__\":\n main()\n\nprint(\"end\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
10,
19,
22,
24,
31
]
}
|
[
10,
19,
22,
24,
31
] |
import numpy as np
import cv2
import skimage.color
import skimage.filters
import skimage.io
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import pickle
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Conv2DTranspose, Activation,\
Concatenate
from keras.losses import sparse_categorical_crossentropy
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam
from keras.models import load_model, Model
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler
from preprocess_data import get_data
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.regularizers import l2
from keras.utils import to_categorical
import keras.metrics
from sklearn.utils import class_weight
from utils import scheduler
image_size = 256
method = 0
batch_size = 8
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
]
#get_data(save_data=True, method=method)
X_Train = np.load('data/X_train_256_GRY.npy')
X_Val = np.load('data/X_val_256_GRY.npy')
X_Test = np.load('data/X_test_256_GRY.npy')
Y_Train = np.load('data/Y_train.npy')
Y_Val = np.load('data/Y_val.npy')
Y_Test = np.load('data/Y_test.npy')
print("Train Benign: " + str(np.count_nonzero(Y_Train == 0)))
print("Train Malignant: " + str(np.count_nonzero(Y_Train == 1)))
print("Test Benign: " + str(np.count_nonzero(Y_Test == 0)))
print("Test Malignant: " + str(np.count_nonzero(Y_Test == 1)))
print("X_Train shape: " + str(X_Train.shape))
print("Y_Train shape: " + str(Y_Train.shape))
print("X_Test shape: " + str(X_Test.shape))
print("Y_Test shape: " + str(Y_Test.shape))
print("X_Val shape: " + str(X_Val.shape))
print("Y_Val shape: " + str(Y_Val.shape))
batches_per_epoch = int(X_Train.shape[0] / batch_size)
print("batches_per_epoch= " + str(batches_per_epoch))
val_batches_per_epoch = int(X_Val.shape[0] / batch_size)
print("validation batches_per_epoch= " + str(val_batches_per_epoch))
print("Steps per epoch: ", batches_per_epoch)
epoch_count = 25
class_weights = {0: 0.5, 1: 1.0}
#data Augmentation
train_generator = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=180,
shear_range=15,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
val_generator = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=180,
shear_range=15,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
train_generator.fit(X_Train)
val_generator.fit(X_Val)
# Create callbacks
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1, mode='min')
#reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='min')
reduce_lr = LearningRateScheduler(scheduler)
filepath="checkpoints/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, mode='min')
callbacks = [reduce_lr, early_stopping, checkpointer]
vgg = VGG16(weights='imagenet',
include_top=False,
input_shape=(image_size, image_size, 3))
model = Sequential()
model.add(vgg)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Freeze the convolutional base
vgg.trainable = False
opt = keras.optimizers.Adam(learning_rate=0.001)
# Compile the model
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=["accuracy"])
# Train
history = model.fit(
train_generator.flow(X_Train, Y_Train, batch_size=batch_size),
steps_per_epoch=len(X_Train) / batch_size,
epochs=14,
class_weight=class_weights,
shuffle=True,
validation_data=val_generator.flow(X_Val, Y_Val, batch_size=batch_size),
callbacks=callbacks,
verbose=2
)
model.save("models/vgg.h5")
|
normal
|
{
"blob_id": "42ae3804c2d8f6a0d440e2bb6231186a868630b1",
"index": 2772,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Train Benign: ' + str(np.count_nonzero(Y_Train == 0)))\nprint('Train Malignant: ' + str(np.count_nonzero(Y_Train == 1)))\nprint('Test Benign: ' + str(np.count_nonzero(Y_Test == 0)))\nprint('Test Malignant: ' + str(np.count_nonzero(Y_Test == 1)))\nprint('X_Train shape: ' + str(X_Train.shape))\nprint('Y_Train shape: ' + str(Y_Train.shape))\nprint('X_Test shape: ' + str(X_Test.shape))\nprint('Y_Test shape: ' + str(Y_Test.shape))\nprint('X_Val shape: ' + str(X_Val.shape))\nprint('Y_Val shape: ' + str(Y_Val.shape))\n<mask token>\nprint('batches_per_epoch= ' + str(batches_per_epoch))\n<mask token>\nprint('validation batches_per_epoch= ' + str(val_batches_per_epoch))\nprint('Steps per epoch: ', batches_per_epoch)\n<mask token>\ntrain_generator.fit(X_Train)\nval_generator.fit(X_Val)\n<mask token>\nmodel.add(vgg)\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n<mask token>\nmodel.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])\n<mask token>\nmodel.save('models/vgg.h5')\n",
"step-3": "<mask token>\nimage_size = 256\nmethod = 0\nbatch_size = 8\nMETRICS = [keras.metrics.TruePositives(name='tp'), keras.metrics.\n FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'),\n keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(\n name='accuracy'), keras.metrics.Precision(name='precision'), keras.\n metrics.Recall(name='recall'), keras.metrics.AUC(name='auc')]\nX_Train = np.load('data/X_train_256_GRY.npy')\nX_Val = np.load('data/X_val_256_GRY.npy')\nX_Test = np.load('data/X_test_256_GRY.npy')\nY_Train = np.load('data/Y_train.npy')\nY_Val = np.load('data/Y_val.npy')\nY_Test = np.load('data/Y_test.npy')\nprint('Train Benign: ' + str(np.count_nonzero(Y_Train == 0)))\nprint('Train Malignant: ' + str(np.count_nonzero(Y_Train == 1)))\nprint('Test Benign: ' + str(np.count_nonzero(Y_Test == 0)))\nprint('Test Malignant: ' + str(np.count_nonzero(Y_Test == 1)))\nprint('X_Train shape: ' + str(X_Train.shape))\nprint('Y_Train shape: ' + str(Y_Train.shape))\nprint('X_Test shape: ' + str(X_Test.shape))\nprint('Y_Test shape: ' + str(Y_Test.shape))\nprint('X_Val shape: ' + str(X_Val.shape))\nprint('Y_Val shape: ' + str(Y_Val.shape))\nbatches_per_epoch = int(X_Train.shape[0] / batch_size)\nprint('batches_per_epoch= ' + str(batches_per_epoch))\nval_batches_per_epoch = int(X_Val.shape[0] / batch_size)\nprint('validation batches_per_epoch= ' + str(val_batches_per_epoch))\nprint('Steps per epoch: ', batches_per_epoch)\nepoch_count = 25\nclass_weights = {(0): 0.5, (1): 1.0}\ntrain_generator = ImageDataGenerator(preprocessing_function=\n preprocess_input, rotation_range=180, shear_range=15, zoom_range=0.2,\n width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True,\n vertical_flip=True, fill_mode='reflect')\nval_generator = ImageDataGenerator(preprocessing_function=preprocess_input,\n rotation_range=180, shear_range=15, zoom_range=0.2, width_shift_range=\n 0.2, height_shift_range=0.2, horizontal_flip=True, vertical_flip=True,\n fill_mode='reflect')\ntrain_generator.fit(X_Train)\nval_generator.fit(X_Val)\nearly_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1,\n mode='min')\nreduce_lr = LearningRateScheduler(scheduler)\nfilepath = 'checkpoints/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,\n save_best_only=False, mode='min')\ncallbacks = [reduce_lr, early_stopping, checkpointer]\nvgg = VGG16(weights='imagenet', include_top=False, input_shape=(image_size,\n image_size, 3))\nmodel = Sequential()\nmodel.add(vgg)\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nvgg.trainable = False\nopt = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])\nhistory = model.fit(train_generator.flow(X_Train, Y_Train, batch_size=\n batch_size), steps_per_epoch=len(X_Train) / batch_size, epochs=14,\n class_weight=class_weights, shuffle=True, validation_data=val_generator\n .flow(X_Val, Y_Val, batch_size=batch_size), callbacks=callbacks, verbose=2)\nmodel.save('models/vgg.h5')\n",
"step-4": "import numpy as np\nimport cv2\nimport skimage.color\nimport skimage.filters\nimport skimage.io\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nimport pickle\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.utils import check_random_state\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Conv2DTranspose, Activation, Concatenate\nfrom keras.losses import sparse_categorical_crossentropy\nfrom keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\nfrom keras.models import load_model, Model\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler\nfrom preprocess_data import get_data\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.regularizers import l2\nfrom keras.utils import to_categorical\nimport keras.metrics\nfrom sklearn.utils import class_weight\nfrom utils import scheduler\nimage_size = 256\nmethod = 0\nbatch_size = 8\nMETRICS = [keras.metrics.TruePositives(name='tp'), keras.metrics.\n FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'),\n keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(\n name='accuracy'), keras.metrics.Precision(name='precision'), keras.\n metrics.Recall(name='recall'), keras.metrics.AUC(name='auc')]\nX_Train = np.load('data/X_train_256_GRY.npy')\nX_Val = np.load('data/X_val_256_GRY.npy')\nX_Test = np.load('data/X_test_256_GRY.npy')\nY_Train = np.load('data/Y_train.npy')\nY_Val = np.load('data/Y_val.npy')\nY_Test = np.load('data/Y_test.npy')\nprint('Train Benign: ' + str(np.count_nonzero(Y_Train == 0)))\nprint('Train Malignant: ' + str(np.count_nonzero(Y_Train == 1)))\nprint('Test Benign: ' + str(np.count_nonzero(Y_Test == 0)))\nprint('Test Malignant: ' + str(np.count_nonzero(Y_Test == 1)))\nprint('X_Train shape: ' + str(X_Train.shape))\nprint('Y_Train shape: ' + str(Y_Train.shape))\nprint('X_Test shape: ' + str(X_Test.shape))\nprint('Y_Test shape: ' + str(Y_Test.shape))\nprint('X_Val shape: ' + str(X_Val.shape))\nprint('Y_Val shape: ' + str(Y_Val.shape))\nbatches_per_epoch = int(X_Train.shape[0] / batch_size)\nprint('batches_per_epoch= ' + str(batches_per_epoch))\nval_batches_per_epoch = int(X_Val.shape[0] / batch_size)\nprint('validation batches_per_epoch= ' + str(val_batches_per_epoch))\nprint('Steps per epoch: ', batches_per_epoch)\nepoch_count = 25\nclass_weights = {(0): 0.5, (1): 1.0}\ntrain_generator = ImageDataGenerator(preprocessing_function=\n preprocess_input, rotation_range=180, shear_range=15, zoom_range=0.2,\n width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True,\n vertical_flip=True, fill_mode='reflect')\nval_generator = ImageDataGenerator(preprocessing_function=preprocess_input,\n rotation_range=180, shear_range=15, zoom_range=0.2, width_shift_range=\n 0.2, height_shift_range=0.2, horizontal_flip=True, vertical_flip=True,\n fill_mode='reflect')\ntrain_generator.fit(X_Train)\nval_generator.fit(X_Val)\nearly_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1,\n mode='min')\nreduce_lr = LearningRateScheduler(scheduler)\nfilepath = 'checkpoints/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,\n save_best_only=False, mode='min')\ncallbacks = [reduce_lr, early_stopping, checkpointer]\nvgg = VGG16(weights='imagenet', include_top=False, input_shape=(image_size,\n image_size, 3))\nmodel = Sequential()\nmodel.add(vgg)\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nvgg.trainable = False\nopt = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])\nhistory = model.fit(train_generator.flow(X_Train, Y_Train, batch_size=\n batch_size), steps_per_epoch=len(X_Train) / batch_size, epochs=14,\n class_weight=class_weights, shuffle=True, validation_data=val_generator\n .flow(X_Val, Y_Val, batch_size=batch_size), callbacks=callbacks, verbose=2)\nmodel.save('models/vgg.h5')\n",
"step-5": "import numpy as np\nimport cv2\nimport skimage.color\nimport skimage.filters\nimport skimage.io\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nimport pickle\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.utils import check_random_state\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Conv2DTranspose, Activation,\\\n Concatenate\nfrom keras.losses import sparse_categorical_crossentropy\nfrom keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam\nfrom keras.models import load_model, Model\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler\nfrom preprocess_data import get_data\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.regularizers import l2\nfrom keras.utils import to_categorical\nimport keras.metrics\nfrom sklearn.utils import class_weight\nfrom utils import scheduler\n\nimage_size = 256\nmethod = 0\nbatch_size = 8\n\nMETRICS = [\n keras.metrics.TruePositives(name='tp'),\n keras.metrics.FalsePositives(name='fp'),\n keras.metrics.TrueNegatives(name='tn'),\n keras.metrics.FalseNegatives(name='fn'),\n keras.metrics.BinaryAccuracy(name='accuracy'),\n keras.metrics.Precision(name='precision'),\n keras.metrics.Recall(name='recall'),\n keras.metrics.AUC(name='auc'),\n]\n\n\n#get_data(save_data=True, method=method)\n\nX_Train = np.load('data/X_train_256_GRY.npy')\nX_Val = np.load('data/X_val_256_GRY.npy')\nX_Test = np.load('data/X_test_256_GRY.npy')\nY_Train = np.load('data/Y_train.npy')\nY_Val = np.load('data/Y_val.npy')\nY_Test = np.load('data/Y_test.npy')\nprint(\"Train Benign: \" + str(np.count_nonzero(Y_Train == 0)))\nprint(\"Train Malignant: \" + str(np.count_nonzero(Y_Train == 1)))\n\nprint(\"Test Benign: \" + str(np.count_nonzero(Y_Test == 0)))\nprint(\"Test Malignant: \" + str(np.count_nonzero(Y_Test == 1)))\n\n\n\nprint(\"X_Train shape: \" + str(X_Train.shape))\nprint(\"Y_Train shape: \" + str(Y_Train.shape))\nprint(\"X_Test shape: \" + str(X_Test.shape))\nprint(\"Y_Test shape: \" + str(Y_Test.shape))\nprint(\"X_Val shape: \" + str(X_Val.shape))\nprint(\"Y_Val shape: \" + str(Y_Val.shape))\n\nbatches_per_epoch = int(X_Train.shape[0] / batch_size)\nprint(\"batches_per_epoch= \" + str(batches_per_epoch))\nval_batches_per_epoch = int(X_Val.shape[0] / batch_size)\n\n\n\n\nprint(\"validation batches_per_epoch= \" + str(val_batches_per_epoch))\nprint(\"Steps per epoch: \", batches_per_epoch)\n\nepoch_count = 25\n\nclass_weights = {0: 0.5, 1: 1.0}\n\n\n#data Augmentation\ntrain_generator = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n rotation_range=180,\n shear_range=15,\n zoom_range=0.2,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='reflect')\nval_generator = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n rotation_range=180,\n shear_range=15,\n zoom_range=0.2,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='reflect')\ntrain_generator.fit(X_Train)\nval_generator.fit(X_Val)\n# Create callbacks\nearly_stopping = EarlyStopping(monitor='val_loss', patience=10,\n verbose=1, mode='min')\n\n\n\n#reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='min')\nreduce_lr = LearningRateScheduler(scheduler)\n\nfilepath=\"checkpoints/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5\"\ncheckpointer = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, mode='min')\n\ncallbacks = [reduce_lr, early_stopping, checkpointer]\n\nvgg = VGG16(weights='imagenet',\n include_top=False,\n input_shape=(image_size, image_size, 3))\n\nmodel = Sequential()\nmodel.add(vgg)\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\n# Freeze the convolutional base\nvgg.trainable = False\n\n\nopt = keras.optimizers.Adam(learning_rate=0.001)\n\n# Compile the model\nmodel.compile(optimizer=opt, loss='binary_crossentropy', metrics=[\"accuracy\"])\n\n# Train\nhistory = model.fit(\n train_generator.flow(X_Train, Y_Train, batch_size=batch_size),\n steps_per_epoch=len(X_Train) / batch_size,\n epochs=14,\n class_weight=class_weights,\n shuffle=True,\n validation_data=val_generator.flow(X_Val, Y_Val, batch_size=batch_size),\n callbacks=callbacks,\n verbose=2\n)\n\n\nmodel.save(\"models/vgg.h5\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, render_template, request
app = Flask(__name__)
def convert(decimal_num):
roman = {1000:'M', 900:'CM', 500:'D', 400:'CD', 100:'C', 90:'XC', 50:'L', 40:'XL', 10:'X', 9:'IX', 5:'V', 4:'IV', 1:'I'}
num_to_roman = ''
for i in roman.keys():
num_to_roman += roman[i]*(decimal_num//i)
decimal_num %= i
return num_to_roman
# Ister ustekini kullan ister bunu
#def convert_to_roman(num):
# roman = ['M','CM','D','CD','C','XC','L','XL','X','IX','V','IV','I']
# number = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
# romanvalue = ''
# for i,d in enumerate(number):
# while (num >= d):
# num -= d
# romanvalue += roman[i]
# return romanvalue
@app.route('/', methods=['POST','GET'])
def main_post():
if request.method == 'POST':
alpha = request.form['number'] # degerler dictionary olarak geliyor dedi o yuzden key i aliyoz [] ile
if not alpha.isdecimal():
return render_template('index.html', not_valid=True,developer_name='Pablo')
number=int(alpha)
if not 0<number<4000:
return render_template('index.html', not_valid=True,developer_name='Pablo')
return render_template('result.html', developer_name='Pablo', number_decimal=number,number_roman=convert(number))
else:
return render_template('index.html',not_valid = False, develeoper_name='Pablo')
if __name__=='__main__':
#app.run(debug=True)
app.run(host='0.0.0.0',port=80)
|
normal
|
{
"blob_id": "7025cc896035c59e0bbb7943493b6ca24fd9e6ca",
"index": 9429,
"step-1": "<mask token>\n\n\ndef convert(decimal_num):\n roman = {(1000): 'M', (900): 'CM', (500): 'D', (400): 'CD', (100): 'C',\n (90): 'XC', (50): 'L', (40): 'XL', (10): 'X', (9): 'IX', (5): 'V',\n (4): 'IV', (1): 'I'}\n num_to_roman = ''\n for i in roman.keys():\n num_to_roman += roman[i] * (decimal_num // i)\n decimal_num %= i\n return num_to_roman\n\n\[email protected]('/', methods=['POST', 'GET'])\ndef main_post():\n if request.method == 'POST':\n alpha = request.form['number']\n if not alpha.isdecimal():\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n number = int(alpha)\n if not 0 < number < 4000:\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n return render_template('result.html', developer_name='Pablo',\n number_decimal=number, number_roman=convert(number))\n else:\n return render_template('index.html', not_valid=False,\n develeoper_name='Pablo')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert(decimal_num):\n roman = {(1000): 'M', (900): 'CM', (500): 'D', (400): 'CD', (100): 'C',\n (90): 'XC', (50): 'L', (40): 'XL', (10): 'X', (9): 'IX', (5): 'V',\n (4): 'IV', (1): 'I'}\n num_to_roman = ''\n for i in roman.keys():\n num_to_roman += roman[i] * (decimal_num // i)\n decimal_num %= i\n return num_to_roman\n\n\[email protected]('/', methods=['POST', 'GET'])\ndef main_post():\n if request.method == 'POST':\n alpha = request.form['number']\n if not alpha.isdecimal():\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n number = int(alpha)\n if not 0 < number < 4000:\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n return render_template('result.html', developer_name='Pablo',\n number_decimal=number, number_roman=convert(number))\n else:\n return render_template('index.html', not_valid=False,\n develeoper_name='Pablo')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\ndef convert(decimal_num):\n roman = {(1000): 'M', (900): 'CM', (500): 'D', (400): 'CD', (100): 'C',\n (90): 'XC', (50): 'L', (40): 'XL', (10): 'X', (9): 'IX', (5): 'V',\n (4): 'IV', (1): 'I'}\n num_to_roman = ''\n for i in roman.keys():\n num_to_roman += roman[i] * (decimal_num // i)\n decimal_num %= i\n return num_to_roman\n\n\[email protected]('/', methods=['POST', 'GET'])\ndef main_post():\n if request.method == 'POST':\n alpha = request.form['number']\n if not alpha.isdecimal():\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n number = int(alpha)\n if not 0 < number < 4000:\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n return render_template('result.html', developer_name='Pablo',\n number_decimal=number, number_roman=convert(number))\n else:\n return render_template('index.html', not_valid=False,\n develeoper_name='Pablo')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n",
"step-4": "from flask import Flask, render_template, request\napp = Flask(__name__)\n\n\ndef convert(decimal_num):\n roman = {(1000): 'M', (900): 'CM', (500): 'D', (400): 'CD', (100): 'C',\n (90): 'XC', (50): 'L', (40): 'XL', (10): 'X', (9): 'IX', (5): 'V',\n (4): 'IV', (1): 'I'}\n num_to_roman = ''\n for i in roman.keys():\n num_to_roman += roman[i] * (decimal_num // i)\n decimal_num %= i\n return num_to_roman\n\n\[email protected]('/', methods=['POST', 'GET'])\ndef main_post():\n if request.method == 'POST':\n alpha = request.form['number']\n if not alpha.isdecimal():\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n number = int(alpha)\n if not 0 < number < 4000:\n return render_template('index.html', not_valid=True,\n developer_name='Pablo')\n return render_template('result.html', developer_name='Pablo',\n number_decimal=number, number_roman=convert(number))\n else:\n return render_template('index.html', not_valid=False,\n develeoper_name='Pablo')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n",
"step-5": "from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\ndef convert(decimal_num):\n roman = {1000:'M', 900:'CM', 500:'D', 400:'CD', 100:'C', 90:'XC', 50:'L', 40:'XL', 10:'X', 9:'IX', 5:'V', 4:'IV', 1:'I'}\n num_to_roman = ''\n for i in roman.keys():\n num_to_roman += roman[i]*(decimal_num//i)\n decimal_num %= i \n return num_to_roman\n\n# Ister ustekini kullan ister bunu\n#def convert_to_roman(num):\n# roman = ['M','CM','D','CD','C','XC','L','XL','X','IX','V','IV','I']\n# number = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\n# romanvalue = ''\n# for i,d in enumerate(number):\n# while (num >= d): \n# num -= d\n# romanvalue += roman[i]\n# return romanvalue\n\[email protected]('/', methods=['POST','GET'])\ndef main_post():\n if request.method == 'POST':\n alpha = request.form['number'] # degerler dictionary olarak geliyor dedi o yuzden key i aliyoz [] ile\n if not alpha.isdecimal():\n return render_template('index.html', not_valid=True,developer_name='Pablo')\n number=int(alpha)\n if not 0<number<4000:\n return render_template('index.html', not_valid=True,developer_name='Pablo')\n return render_template('result.html', developer_name='Pablo', number_decimal=number,number_roman=convert(number))\n \n\n\n\n else:\n return render_template('index.html',not_valid = False, develeoper_name='Pablo') \n\n\n\n\n\n\n\n\nif __name__=='__main__':\n #app.run(debug=True)\n app.run(host='0.0.0.0',port=80)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Score(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Score(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __unicode__(self):
return smart_unicode(self.ps)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Score(models.Model):
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore = models.IntegerField(null=False, blank=False)
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps)
<|reserved_special_token_1|>
from django.db import models
from django.utils.encoding import smart_unicode
class Score(models.Model):
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore = models.IntegerField(null=False, blank=False)
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps)
<|reserved_special_token_1|>
from django.db import models
#from publicservants import models
from django.utils.encoding import smart_unicode
# Create your models here.
class Score(models.Model):
#score ID - publicservant ID plus score
#sID = models.ManyToOneRel(field=PublicServant.psID)
#PS Score at time t
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
#Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.
#Aggregrate values for period of time
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore= models.IntegerField(null=False, blank=False)
#Actual PSScore at 12am everyday
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)
|
flexible
|
{
"blob_id": "8c166dd4cb091dcd2d80b5ae3085b5dee77564e0",
"index": 1227,
"step-1": "<mask token>\n\n\nclass Score(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Score(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-3": "<mask token>\n\n\nclass Score(models.Model):\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n userScore = models.IntegerField(null=False, blank=False)\n ps = models.IntegerField(null=False)\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-4": "from django.db import models\nfrom django.utils.encoding import smart_unicode\n\n\nclass Score(models.Model):\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n userScore = models.IntegerField(null=False, blank=False)\n ps = models.IntegerField(null=False)\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-5": "from django.db import models\n#from publicservants import models\nfrom django.utils.encoding import smart_unicode\n\n# Create your models here.\n\n\nclass Score(models.Model):\n #score ID - publicservant ID plus score\n #sID = models.ManyToOneRel(field=PublicServant.psID)\n \n #PS Score at time t\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n \n #Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.\n #Aggregrate values for period of time\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n \n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n \n userScore= models.IntegerField(null=False, blank=False)\n \n #Actual PSScore at 12am everyday\n ps = models.IntegerField(null=False)\n \n def __unicode__(self):\n return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)\n \n \n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import json
except ImportError:
import simplejson as json
<|reserved_special_token_0|>
if platform.system() == 'Linux':
def SubprocessPopen(k):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,
stderr=devnull)
x = proc.communicate()[0]
return x.strip()
def display_hostname():
x = platform.node()
return x.replace(my_domain, '').replace('.', '').lower()
def display_site():
sites = 'SNE', 'RJO', 'BFC'
x = platform.node()
site = x.upper()[:3]
if site in sites:
return site
else:
return ''
def display_release():
k = "lsb_release -d | awk -F':' '{{print $2}}'"
return SubprocessPopen(k.strip())
def display_hw_serialnumber():
k = "dmidecode -s system-serial-number | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_hw_vendor():
k = "dmidecode -s system-manufacturer | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_hw_model():
k = "dmidecode -s system-product-name | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_fc_wwpn():
k = 'cat /sys/class/fc_host/host*/port_name|xargs'
return SubprocessPopen(k.strip().replace('0x', ''))
def display_ipaddr():
k = (
"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs"
)
return SubprocessPopen(k.strip())
def display_frame():
k = (
"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs"
)
return SubprocessPopen(k.strip())
def display_memory():
k = (
"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'"
)
return SubprocessPopen(k) + ' MB'
def display_cpu():
k = """model=$(lscpu | egrep ^'Model name' | awk -F\\: '{{print$2}}')
socket=$(lscpu | egrep ^'Socket' | awk -F\\: '{{print$2}}')
cpu=$(lscpu | egrep ^'CPU\\(' | awk -F\\: '{{print$2}}')
core=$(lscpu | egrep ^'Core' | awk -F\\: '{{print$2}}')
echo $model / $socket Socket\\(s\\) / $cpu CPU\\(s\\) / $core Core\\(s\\) per Socket"""
return SubprocessPopen(k)
def display_cluster():
k = "/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1"
return SubprocessPopen(k)
def display_clusternodes():
k = '/opt/VRTSvcs/bin/hasys -list'
return SubprocessPopen(k)
def display_db():
k = (
"ps -ef | grep pmon | awk -F\\_ '{{print $3}}' | egrep -v '^$|\\+ASM'"
)
return SubprocessPopen(k)
print(
"""server_name: {0:s}
server_release: {1:s}
server_site: {2:s}
server_vendor: {3:s}
server_model: {4:s}
server_serial: {5:s}
server_cpu: {6:s}
server_memory: {7:s}
server_ip: {8:s}
server_cluster: {9:s}
server_clusternodes: {10:s}
server_frame: {11:s}
server_wwpn: {12:s}
server_db: {13:s}"""
.format(display_hostname(), display_release(), display_site(),
display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),
display_cpu(), display_memory(), display_ipaddr(), display_cluster(
), display_clusternodes(), display_frame(), display_fc_wwpn(),
display_db()))
hadouken = {'server_name': display_hostname(), 'server_release':
display_release(), 'server_site': display_site(), 'server_vendor':
display_hw_vendor(), 'server_model': display_hw_model(),
'server_serial': display_hw_serialnumber(), 'server_cpu':
display_cpu(), 'server_memory': display_memory(), 'server_ip':
display_ipaddr(), 'server_cluster': display_cluster(),
'server_clusternodes': display_clusternodes(), 'server_frame':
display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':
display_db()}
hadouken_file = '/var/tmp/%s.json' % display_hostname()
fp = open(hadouken_file, 'w')
json.dump(hadouken, fp)
else:
print('OS not supported.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import json
except ImportError:
import simplejson as json
my_domain = 'localdomain'
if platform.system() == 'Linux':
def SubprocessPopen(k):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,
stderr=devnull)
x = proc.communicate()[0]
return x.strip()
def display_hostname():
x = platform.node()
return x.replace(my_domain, '').replace('.', '').lower()
def display_site():
sites = 'SNE', 'RJO', 'BFC'
x = platform.node()
site = x.upper()[:3]
if site in sites:
return site
else:
return ''
def display_release():
k = "lsb_release -d | awk -F':' '{{print $2}}'"
return SubprocessPopen(k.strip())
def display_hw_serialnumber():
k = "dmidecode -s system-serial-number | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_hw_vendor():
k = "dmidecode -s system-manufacturer | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_hw_model():
k = "dmidecode -s system-product-name | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_fc_wwpn():
k = 'cat /sys/class/fc_host/host*/port_name|xargs'
return SubprocessPopen(k.strip().replace('0x', ''))
def display_ipaddr():
k = (
"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs"
)
return SubprocessPopen(k.strip())
def display_frame():
k = (
"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs"
)
return SubprocessPopen(k.strip())
def display_memory():
k = (
"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'"
)
return SubprocessPopen(k) + ' MB'
def display_cpu():
k = """model=$(lscpu | egrep ^'Model name' | awk -F\\: '{{print$2}}')
socket=$(lscpu | egrep ^'Socket' | awk -F\\: '{{print$2}}')
cpu=$(lscpu | egrep ^'CPU\\(' | awk -F\\: '{{print$2}}')
core=$(lscpu | egrep ^'Core' | awk -F\\: '{{print$2}}')
echo $model / $socket Socket\\(s\\) / $cpu CPU\\(s\\) / $core Core\\(s\\) per Socket"""
return SubprocessPopen(k)
def display_cluster():
k = "/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1"
return SubprocessPopen(k)
def display_clusternodes():
k = '/opt/VRTSvcs/bin/hasys -list'
return SubprocessPopen(k)
def display_db():
k = (
"ps -ef | grep pmon | awk -F\\_ '{{print $3}}' | egrep -v '^$|\\+ASM'"
)
return SubprocessPopen(k)
print(
"""server_name: {0:s}
server_release: {1:s}
server_site: {2:s}
server_vendor: {3:s}
server_model: {4:s}
server_serial: {5:s}
server_cpu: {6:s}
server_memory: {7:s}
server_ip: {8:s}
server_cluster: {9:s}
server_clusternodes: {10:s}
server_frame: {11:s}
server_wwpn: {12:s}
server_db: {13:s}"""
.format(display_hostname(), display_release(), display_site(),
display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),
display_cpu(), display_memory(), display_ipaddr(), display_cluster(
), display_clusternodes(), display_frame(), display_fc_wwpn(),
display_db()))
hadouken = {'server_name': display_hostname(), 'server_release':
display_release(), 'server_site': display_site(), 'server_vendor':
display_hw_vendor(), 'server_model': display_hw_model(),
'server_serial': display_hw_serialnumber(), 'server_cpu':
display_cpu(), 'server_memory': display_memory(), 'server_ip':
display_ipaddr(), 'server_cluster': display_cluster(),
'server_clusternodes': display_clusternodes(), 'server_frame':
display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':
display_db()}
hadouken_file = '/var/tmp/%s.json' % display_hostname()
fp = open(hadouken_file, 'w')
json.dump(hadouken, fp)
else:
print('OS not supported.')
<|reserved_special_token_1|>
import os
import platform
import subprocess
try:
import json
except ImportError:
import simplejson as json
my_domain = 'localdomain'
if platform.system() == 'Linux':
def SubprocessPopen(k):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,
stderr=devnull)
x = proc.communicate()[0]
return x.strip()
def display_hostname():
x = platform.node()
return x.replace(my_domain, '').replace('.', '').lower()
def display_site():
sites = 'SNE', 'RJO', 'BFC'
x = platform.node()
site = x.upper()[:3]
if site in sites:
return site
else:
return ''
def display_release():
k = "lsb_release -d | awk -F':' '{{print $2}}'"
return SubprocessPopen(k.strip())
def display_hw_serialnumber():
k = "dmidecode -s system-serial-number | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_hw_vendor():
k = "dmidecode -s system-manufacturer | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_hw_model():
k = "dmidecode -s system-product-name | egrep -v '^#'"
return SubprocessPopen(k.strip())
def display_fc_wwpn():
k = 'cat /sys/class/fc_host/host*/port_name|xargs'
return SubprocessPopen(k.strip().replace('0x', ''))
def display_ipaddr():
k = (
"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs"
)
return SubprocessPopen(k.strip())
def display_frame():
k = (
"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs"
)
return SubprocessPopen(k.strip())
def display_memory():
k = (
"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'"
)
return SubprocessPopen(k) + ' MB'
def display_cpu():
k = """model=$(lscpu | egrep ^'Model name' | awk -F\\: '{{print$2}}')
socket=$(lscpu | egrep ^'Socket' | awk -F\\: '{{print$2}}')
cpu=$(lscpu | egrep ^'CPU\\(' | awk -F\\: '{{print$2}}')
core=$(lscpu | egrep ^'Core' | awk -F\\: '{{print$2}}')
echo $model / $socket Socket\\(s\\) / $cpu CPU\\(s\\) / $core Core\\(s\\) per Socket"""
return SubprocessPopen(k)
def display_cluster():
k = "/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1"
return SubprocessPopen(k)
def display_clusternodes():
k = '/opt/VRTSvcs/bin/hasys -list'
return SubprocessPopen(k)
def display_db():
k = (
"ps -ef | grep pmon | awk -F\\_ '{{print $3}}' | egrep -v '^$|\\+ASM'"
)
return SubprocessPopen(k)
print(
"""server_name: {0:s}
server_release: {1:s}
server_site: {2:s}
server_vendor: {3:s}
server_model: {4:s}
server_serial: {5:s}
server_cpu: {6:s}
server_memory: {7:s}
server_ip: {8:s}
server_cluster: {9:s}
server_clusternodes: {10:s}
server_frame: {11:s}
server_wwpn: {12:s}
server_db: {13:s}"""
.format(display_hostname(), display_release(), display_site(),
display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),
display_cpu(), display_memory(), display_ipaddr(), display_cluster(
), display_clusternodes(), display_frame(), display_fc_wwpn(),
display_db()))
hadouken = {'server_name': display_hostname(), 'server_release':
display_release(), 'server_site': display_site(), 'server_vendor':
display_hw_vendor(), 'server_model': display_hw_model(),
'server_serial': display_hw_serialnumber(), 'server_cpu':
display_cpu(), 'server_memory': display_memory(), 'server_ip':
display_ipaddr(), 'server_cluster': display_cluster(),
'server_clusternodes': display_clusternodes(), 'server_frame':
display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':
display_db()}
hadouken_file = '/var/tmp/%s.json' % display_hostname()
fp = open(hadouken_file, 'w')
json.dump(hadouken, fp)
else:
print('OS not supported.')
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import platform
import subprocess
# try to import json module, if got an error use simplejson instead of json.
try:
import json
except ImportError:
import simplejson as json
# if your server uses fqdn, you can suppress the domain, just change the bellow variable to your domain.
my_domain = 'localdomain'
# checks if operating system is Linux.
if platform.system() == 'Linux':
# subprocess funciton, pass a operation system command as k variable.
def SubprocessPopen(k):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True, stderr=devnull)
x = proc.communicate()[0]
return x.strip()
# display hostname
def display_hostname():
x = platform.node()
return x.replace(my_domain, '').replace('.', '').lower()
# in my case the first 3 letters of the hostname indicates the site location, change if you want.
def display_site():
sites = ('SNE', 'RJO', 'BFC')
x = platform.node()
site = x.upper()[:3]
if site in sites:
return site
else:
return ''
# display operation system release.
def display_release():
k = "lsb_release -d | awk -F':' '{{print $2}}'"
return (SubprocessPopen(k.strip()))
# display the hardware serial number.
def display_hw_serialnumber():
k = "dmidecode -s system-serial-number | egrep -v '^#'"
return (SubprocessPopen(k.strip()))
# display hardware vendor.
def display_hw_vendor():
k = "dmidecode -s system-manufacturer | egrep -v '^#'"
return (SubprocessPopen(k.strip()))
# display hardware model.
def display_hw_model():
k = "dmidecode -s system-product-name | egrep -v '^#'"
return SubprocessPopen(k.strip())
# display fibre channel id wwpn.
def display_fc_wwpn():
k = "cat /sys/class/fc_host/host*/port_name|xargs"
return SubprocessPopen(k.strip().replace('0x', ''))
# display ipv4 address.
def display_ipaddr():
k = "ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs"
return SubprocessPopen(k.strip())
# display EMC storage id.
def display_frame():
k = "powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs"
return SubprocessPopen(k.strip())
# display total memory in MB.
def display_memory():
k = "egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'"
return SubprocessPopen(k) + " MB"
# display cpu info, physical and cores.
def display_cpu():
k = "model=$(lscpu | egrep ^'Model name' | awk -F\: '{{print$2}}')\n" \
"socket=$(lscpu | egrep ^'Socket' | awk -F\: '{{print$2}}')\n" \
"cpu=$(lscpu | egrep ^'CPU\(' | awk -F\: '{{print$2}}')\n" \
"core=$(lscpu | egrep ^'Core' | awk -F\: '{{print$2}}')\n" \
"echo ""$model / $socket Socket\\(s\\) / $cpu CPU\\(s\\) / $core Core\\(s\\) per Socket"""
return SubprocessPopen(k)
# display information about Veritas InforScale and Cluster Server.
def display_cluster():
k = "/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1"
return SubprocessPopen(k)
# display the list of cluster nodes.
def display_clusternodes():
k = "/opt/VRTSvcs/bin/hasys -list"
return SubprocessPopen(k)
# display the name of Oracle instances.
def display_db():
k = "ps -ef | grep pmon | awk -F\_ '{{print $3}}' | egrep -v '^$|\+ASM'"
return SubprocessPopen(k)
# print all information on the screen.
print(
"server_name: {0:s} \n"
"server_release: {1:s} \n"
"server_site: {2:s} \n"
"server_vendor: {3:s} \n"
"server_model: {4:s} \n"
"server_serial: {5:s} \n"
"server_cpu: {6:s} \n"
"server_memory: {7:s} \n"
"server_ip: {8:s} \n"
"server_cluster: {9:s} \n"
"server_clusternodes: {10:s} \n"
"server_frame: {11:s} \n"
"server_wwpn: {12:s} \n"
"server_db: {13:s}".format(display_hostname(), display_release(), display_site(), display_hw_vendor(), display_hw_model(),
display_hw_serialnumber(),
display_cpu(), display_memory(), display_ipaddr(), display_cluster(), display_clusternodes(),
display_frame(),
display_fc_wwpn(), display_db()))
# create a dict to export info to sqlite db.
hadouken = {'server_name': display_hostname(), 'server_release': display_release(), 'server_site': display_site(),
'server_vendor': display_hw_vendor(), 'server_model': display_hw_model(),
'server_serial': display_hw_serialnumber(), 'server_cpu': display_cpu(), 'server_memory': display_memory(),
'server_ip': display_ipaddr(), 'server_cluster': display_cluster(), 'server_clusternodes': display_clusternodes(),
'server_frame': display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db': display_db()}
# export hadouken info to be loaded into sqlite3 using db.py..
hadouken_file = '/var/tmp/%s.json' % display_hostname()
fp = open(hadouken_file, 'w')
json.dump(hadouken, fp)
else:
# if the operation system is not Linux, sorry.
print("OS not supported.")
|
flexible
|
{
"blob_id": "de819a72ab659b50620fad2296027cb9f4d3e4c0",
"index": 5048,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n<mask token>\nif platform.system() == 'Linux':\n\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,\n stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n def display_site():\n sites = 'SNE', 'RJO', 'BFC'\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_fc_wwpn():\n k = 'cat /sys/class/fc_host/host*/port_name|xargs'\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n def display_ipaddr():\n k = (\n \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_frame():\n k = (\n \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_memory():\n k = (\n \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n )\n return SubprocessPopen(k) + ' MB'\n\n def display_cpu():\n k = \"\"\"model=$(lscpu | egrep ^'Model name' | awk -F\\\\: '{{print$2}}')\nsocket=$(lscpu | egrep ^'Socket' | awk -F\\\\: '{{print$2}}')\ncpu=$(lscpu | egrep ^'CPU\\\\(' | awk -F\\\\: '{{print$2}}')\ncore=$(lscpu | egrep ^'Core' | awk -F\\\\: '{{print$2}}')\necho $model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n def display_clusternodes():\n k = '/opt/VRTSvcs/bin/hasys -list'\n return SubprocessPopen(k)\n\n def display_db():\n k = (\n \"ps -ef | grep pmon | awk -F\\\\_ '{{print $3}}' | egrep -v '^$|\\\\+ASM'\"\n )\n return SubprocessPopen(k)\n print(\n \"\"\"server_name: {0:s} \nserver_release: {1:s} \nserver_site: {2:s} \nserver_vendor: {3:s} \nserver_model: {4:s} \nserver_serial: {5:s} \nserver_cpu: {6:s} \nserver_memory: {7:s} \nserver_ip: {8:s} \nserver_cluster: {9:s} \nserver_clusternodes: {10:s} \nserver_frame: {11:s} \nserver_wwpn: {12:s} \nserver_db: {13:s}\"\"\"\n .format(display_hostname(), display_release(), display_site(),\n display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(\n ), display_clusternodes(), display_frame(), display_fc_wwpn(),\n display_db()))\n hadouken = {'server_name': display_hostname(), 'server_release':\n display_release(), 'server_site': display_site(), 'server_vendor':\n display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu':\n display_cpu(), 'server_memory': display_memory(), 'server_ip':\n display_ipaddr(), 'server_cluster': display_cluster(),\n 'server_clusternodes': display_clusternodes(), 'server_frame':\n display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':\n display_db()}\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\nelse:\n print('OS not supported.')\n",
"step-3": "<mask token>\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nmy_domain = 'localdomain'\nif platform.system() == 'Linux':\n\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,\n stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n def display_site():\n sites = 'SNE', 'RJO', 'BFC'\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_fc_wwpn():\n k = 'cat /sys/class/fc_host/host*/port_name|xargs'\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n def display_ipaddr():\n k = (\n \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_frame():\n k = (\n \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_memory():\n k = (\n \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n )\n return SubprocessPopen(k) + ' MB'\n\n def display_cpu():\n k = \"\"\"model=$(lscpu | egrep ^'Model name' | awk -F\\\\: '{{print$2}}')\nsocket=$(lscpu | egrep ^'Socket' | awk -F\\\\: '{{print$2}}')\ncpu=$(lscpu | egrep ^'CPU\\\\(' | awk -F\\\\: '{{print$2}}')\ncore=$(lscpu | egrep ^'Core' | awk -F\\\\: '{{print$2}}')\necho $model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n def display_clusternodes():\n k = '/opt/VRTSvcs/bin/hasys -list'\n return SubprocessPopen(k)\n\n def display_db():\n k = (\n \"ps -ef | grep pmon | awk -F\\\\_ '{{print $3}}' | egrep -v '^$|\\\\+ASM'\"\n )\n return SubprocessPopen(k)\n print(\n \"\"\"server_name: {0:s} \nserver_release: {1:s} \nserver_site: {2:s} \nserver_vendor: {3:s} \nserver_model: {4:s} \nserver_serial: {5:s} \nserver_cpu: {6:s} \nserver_memory: {7:s} \nserver_ip: {8:s} \nserver_cluster: {9:s} \nserver_clusternodes: {10:s} \nserver_frame: {11:s} \nserver_wwpn: {12:s} \nserver_db: {13:s}\"\"\"\n .format(display_hostname(), display_release(), display_site(),\n display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(\n ), display_clusternodes(), display_frame(), display_fc_wwpn(),\n display_db()))\n hadouken = {'server_name': display_hostname(), 'server_release':\n display_release(), 'server_site': display_site(), 'server_vendor':\n display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu':\n display_cpu(), 'server_memory': display_memory(), 'server_ip':\n display_ipaddr(), 'server_cluster': display_cluster(),\n 'server_clusternodes': display_clusternodes(), 'server_frame':\n display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':\n display_db()}\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\nelse:\n print('OS not supported.')\n",
"step-4": "import os\nimport platform\nimport subprocess\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nmy_domain = 'localdomain'\nif platform.system() == 'Linux':\n\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,\n stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n def display_site():\n sites = 'SNE', 'RJO', 'BFC'\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_fc_wwpn():\n k = 'cat /sys/class/fc_host/host*/port_name|xargs'\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n def display_ipaddr():\n k = (\n \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_frame():\n k = (\n \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_memory():\n k = (\n \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n )\n return SubprocessPopen(k) + ' MB'\n\n def display_cpu():\n k = \"\"\"model=$(lscpu | egrep ^'Model name' | awk -F\\\\: '{{print$2}}')\nsocket=$(lscpu | egrep ^'Socket' | awk -F\\\\: '{{print$2}}')\ncpu=$(lscpu | egrep ^'CPU\\\\(' | awk -F\\\\: '{{print$2}}')\ncore=$(lscpu | egrep ^'Core' | awk -F\\\\: '{{print$2}}')\necho $model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n def display_clusternodes():\n k = '/opt/VRTSvcs/bin/hasys -list'\n return SubprocessPopen(k)\n\n def display_db():\n k = (\n \"ps -ef | grep pmon | awk -F\\\\_ '{{print $3}}' | egrep -v '^$|\\\\+ASM'\"\n )\n return SubprocessPopen(k)\n print(\n \"\"\"server_name: {0:s} \nserver_release: {1:s} \nserver_site: {2:s} \nserver_vendor: {3:s} \nserver_model: {4:s} \nserver_serial: {5:s} \nserver_cpu: {6:s} \nserver_memory: {7:s} \nserver_ip: {8:s} \nserver_cluster: {9:s} \nserver_clusternodes: {10:s} \nserver_frame: {11:s} \nserver_wwpn: {12:s} \nserver_db: {13:s}\"\"\"\n .format(display_hostname(), display_release(), display_site(),\n display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(\n ), display_clusternodes(), display_frame(), display_fc_wwpn(),\n display_db()))\n hadouken = {'server_name': display_hostname(), 'server_release':\n display_release(), 'server_site': display_site(), 'server_vendor':\n display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu':\n display_cpu(), 'server_memory': display_memory(), 'server_ip':\n display_ipaddr(), 'server_cluster': display_cluster(),\n 'server_clusternodes': display_clusternodes(), 'server_frame':\n display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':\n display_db()}\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\nelse:\n print('OS not supported.')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport os\nimport platform\nimport subprocess\n\n# try to import json module, if got an error use simplejson instead of json.\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n# if your server uses fqdn, you can suppress the domain, just change the bellow variable to your domain.\nmy_domain = 'localdomain'\n\n# checks if operating system is Linux.\nif platform.system() == 'Linux':\n # subprocess funciton, pass a operation system command as k variable.\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True, stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n # display hostname\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n # in my case the first 3 letters of the hostname indicates the site location, change if you want.\n def display_site():\n sites = ('SNE', 'RJO', 'BFC')\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n # display operation system release.\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return (SubprocessPopen(k.strip()))\n\n # display the hardware serial number.\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return (SubprocessPopen(k.strip()))\n\n # display hardware vendor.\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return (SubprocessPopen(k.strip()))\n\n # display hardware model.\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n # display fibre channel id wwpn.\n def display_fc_wwpn():\n k = \"cat /sys/class/fc_host/host*/port_name|xargs\"\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n # display ipv4 address.\n def display_ipaddr():\n k = \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n return SubprocessPopen(k.strip())\n\n # display EMC storage id.\n def display_frame():\n k = \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n return SubprocessPopen(k.strip())\n\n # display total memory in MB.\n def display_memory():\n k = \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n return SubprocessPopen(k) + \" MB\"\n\n # display cpu info, physical and cores.\n def display_cpu():\n k = \"model=$(lscpu | egrep ^'Model name' | awk -F\\: '{{print$2}}')\\n\" \\\n \"socket=$(lscpu | egrep ^'Socket' | awk -F\\: '{{print$2}}')\\n\" \\\n \"cpu=$(lscpu | egrep ^'CPU\\(' | awk -F\\: '{{print$2}}')\\n\" \\\n \"core=$(lscpu | egrep ^'Core' | awk -F\\: '{{print$2}}')\\n\" \\\n \"echo \"\"$model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n # display information about Veritas InforScale and Cluster Server.\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n # display the list of cluster nodes.\n def display_clusternodes():\n k = \"/opt/VRTSvcs/bin/hasys -list\"\n return SubprocessPopen(k)\n\n # display the name of Oracle instances.\n def display_db():\n k = \"ps -ef | grep pmon | awk -F\\_ '{{print $3}}' | egrep -v '^$|\\+ASM'\"\n return SubprocessPopen(k)\n\n # print all information on the screen.\n print(\n \"server_name: {0:s} \\n\"\n \"server_release: {1:s} \\n\"\n \"server_site: {2:s} \\n\"\n \"server_vendor: {3:s} \\n\"\n \"server_model: {4:s} \\n\"\n \"server_serial: {5:s} \\n\"\n \"server_cpu: {6:s} \\n\"\n \"server_memory: {7:s} \\n\"\n \"server_ip: {8:s} \\n\"\n \"server_cluster: {9:s} \\n\"\n \"server_clusternodes: {10:s} \\n\"\n \"server_frame: {11:s} \\n\"\n \"server_wwpn: {12:s} \\n\"\n \"server_db: {13:s}\".format(display_hostname(), display_release(), display_site(), display_hw_vendor(), display_hw_model(),\n display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(), display_clusternodes(),\n display_frame(),\n display_fc_wwpn(), display_db()))\n\n # create a dict to export info to sqlite db.\n hadouken = {'server_name': display_hostname(), 'server_release': display_release(), 'server_site': display_site(),\n 'server_vendor': display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu': display_cpu(), 'server_memory': display_memory(),\n 'server_ip': display_ipaddr(), 'server_cluster': display_cluster(), 'server_clusternodes': display_clusternodes(),\n 'server_frame': display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db': display_db()}\n\n # export hadouken info to be loaded into sqlite3 using db.py..\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\n\nelse:\n # if the operation system is not Linux, sorry.\n print(\"OS not supported.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from urllib.parse import quote
from top_model import db
from top_model.ext.flask import FlaskTopModel
from top_model.filesystem import ProductPhotoCIP
from top_model.webstore import Product, Labo
from unrest import UnRest
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'] = (
'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')
self.config['SQLALCHEMY_DATABASE_URI'] = (
'pgfdw://hydra@localhost/hydra')
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
app = Hydra(__name__)
rest = UnRest(app, db.session)
rest(Labo, only=('label',))
product_api = rest(Product, query=filter_query, only=(
'product_id', 'title', 'description', 'cip', 'resip_labo_code',
'type_product'))
image_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = (
Product.query
.filter_by(cip=str(product_id))
.filter_by(client_id=app.config['CLIENT_ID'])
.all())
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
|
normal
|
{
"blob_id": "de3a4053b5b0d4d2d5c2dcd317e64cf9b4faeb75",
"index": 562,
"step-1": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\n<mask token>\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-2": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<mask token>\nrest(Labo, only=('label',))\n<mask token>\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-3": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-4": "from urllib.parse import quote\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-5": "from urllib.parse import quote\n\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'] = (\n 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')\n self.config['SQLALCHEMY_DATABASE_URI'] = (\n 'pgfdw://hydra@localhost/hydra')\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\n\n\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=(\n 'product_id', 'title', 'description', 'cip', 'resip_labo_code',\n 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = (\n Product.query\n .filter_by(cip=str(product_id))\n .filter_by(client_id=app.config['CLIENT_ID'])\n .all())\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.
VmIsAccessibleViaSshTestBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.
VmIsAccessibleViaSshTestBase):
vmName = 'cernvm'
timeout = 20 * 60
sshTimeout = 5 * 60
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.
VmIsAccessibleViaSshTestBase):
vmName = 'cernvm'
timeout = 20 * 60
sshTimeout = 5 * 60
def suite():
return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh
)
<|reserved_special_token_1|>
import unittest
import BasicVmLifecycleTestBase
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.
VmIsAccessibleViaSshTestBase):
vmName = 'cernvm'
timeout = 20 * 60
sshTimeout = 5 * 60
def suite():
return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh
)
|
flexible
|
{
"blob_id": "79e4e37fc17462508abf259e3a7861bd76797280",
"index": 9182,
"step-1": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh\n )\n",
"step-4": "import unittest\nimport BasicVmLifecycleTestBase\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh\n )\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))
<|reserved_special_token_0|>
cursor.executemany('INSERT INTO users VALUES(?,?,?)', users)
for row in cursor.execute('SELECT * FROM users'):
print(row)
connection.commit()
connection.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
cursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))
users = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]
cursor.executemany('INSERT INTO users VALUES(?,?,?)', users)
for row in cursor.execute('SELECT * FROM users'):
print(row)
connection.commit()
connection.close()
<|reserved_special_token_1|>
import sqlite3
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
cursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))
users = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]
cursor.executemany('INSERT INTO users VALUES(?,?,?)', users)
for row in cursor.execute('SELECT * FROM users'):
print(row)
connection.commit()
connection.close()
<|reserved_special_token_1|>
import sqlite3
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
# cursor.execute('CREATE TABLE users (id int, username text, password text)')
cursor.execute('INSERT INTO users VALUES(?,?,?)',(1,'ilia','qwerty'))
users = [(2,'nika','asdf'),(3,'nino','sdfg')]
cursor.executemany('INSERT INTO users VALUES(?,?,?)', users)
for row in cursor.execute('SELECT * FROM users'):
print(row)
connection.commit()
connection.close()
|
flexible
|
{
"blob_id": "d6b49533573dfefba6286ac2bffc2bd7a4075063",
"index": 1731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\n<mask token>\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-3": "<mask token>\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\nusers = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-4": "import sqlite3\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\nusers = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-5": "import sqlite3\n\nconnection = sqlite3.connect('database.db')\n\ncursor = connection.cursor()\n\n# cursor.execute('CREATE TABLE users (id int, username text, password text)')\n\ncursor.execute('INSERT INTO users VALUES(?,?,?)',(1,'ilia','qwerty'))\n\nusers = [(2,'nika','asdf'),(3,'nino','sdfg')]\n\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\n\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\n\nconnection.commit()\n\nconnection.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_lex_comment_no_newline():
lexer = gherkin.Lexer(' test comment')
new_state = lexer.lex_comment_metadata_value()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_until_newline():
"""Lexer.lex_comment() Should parse comments until the newline character"""
lexer = gherkin.Lexer('# one line\n# another line')
tokens = lexer.run()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_COMMENT,
'another line'), (2, gherkin.TOKEN_EOF, '')])
<|reserved_special_token_0|>
def test_lex_text_with_labels():
"""Lexer.run() Should be able to tokenize a feature with a scenario"""
lexer = gherkin.Lexer(
"""
Feature: Some descriptive text
In order to parse a Gherkin file
As a parser
I want to be able to parse scenarios
Even more text
Scenario: The user wants to describe a feature
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,
gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.
TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TEXT,
'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,
gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,
gherkin.TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_steps():
"""Lexer.run() Should be able to tokenize steps"""
lexer = gherkin.Lexer(
"""Feature: Feature title
feature description
Background: Some background
about the problem
Scenario: Scenario title
Given first step
When second step
Then third step
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_EOF, '')])
def test_lex_load_languages():
"""Lexer.run() Should be able to parse different languages"""
lexer = gherkin.Lexer(
"""# language: pt-br
Funcionalidade: Interpretador para gherkin
Para escrever testes de aceitação
Como um programador
Preciso de uma ferramenta de BDD
Contexto:
Dado que a variavel "X" contém o número 2
Cenário: Lanche
Dada uma maçã
Quando mordida
Então a fome passa
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,
gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\n'
), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Funcionalidade'), (3, gherkin.TOKEN_TEXT,
'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,
gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Dado que a variavel "X" contém o número 2'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,
gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\n'), (
10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.
TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (
11, gherkin.TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\n'), (13,
gherkin.TOKEN_EOF, '')])
def test_lex_tables():
"""Lexer.run() Should be able to lex tables"""
lexer = gherkin.Lexer(""" Examples:
| column1 | column2 | """)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (
2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])
<|reserved_special_token_0|>
def test_lex_tables_within_steps():
"""Lexer.run() Should be able to lex example tables from steps"""
lexer = gherkin.Lexer(
""" Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_EOF, '')])
def test_lex_multi_line_str():
"""Lexer.run() Should be able to find multi quoted strings after labels"""
lexer = gherkin.Lexer(
""" Given the following email template:
'''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''
And a cat picture
""\"Now notice we didn't use (:) above
""\"
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL,
'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_QUOTES, "'''"), (2, gherkin.TOKEN_TEXT,
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
), (5, gherkin.TOKEN_QUOTES, "'''"), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_QUOTES, '"""'), (7, gherkin
.TOKEN_TEXT, """Now notice we didn't use (:) above
"""), (8,
gherkin.TOKEN_QUOTES, '"""'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
def test_lex_tags_empty():
"""Lexer.lex_tag() Should bail if we reach EOF"""
lexer = gherkin.Lexer('')
lexer.lex_tag()
lexer.tokens.should.be.empty
def test_lex_tags():
"""Lexer.run() Should be able to find tags"""
lexer = gherkin.Lexer(
""" @tagged-feature
Feature: Parse tags
@tag1 @tag2
Scenario: Test
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TAG,
'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,
'Test'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_EOF, '')])
def test_parse_metadata_empty():
Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none
Parser([None]).parse_metadata().should.be.none
def test_parse_metadata_incomplete():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_EOF, '')])
parser.parse_metadata().should.be.none
<|reserved_special_token_0|>
def test_parse_empty_title():
parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'more text after title')])
feature = parser.parse_title()
feature.should.be.none
<|reserved_special_token_0|>
def test_parse_background():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.
TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin
.TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,
gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])
feature = parser.parse_background()
feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text
='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=
'Given two users in the database'), table=Ast.Table(line=3, fields=
[['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))]))
def teste_parse_scenario():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,
text='Given first step'))])])
def teste_parse_scenario_with_description():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,
'More description'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.
TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), description=Ast.Text(line=2, text=
'Scenario description More description'), steps=[Ast.Step(line=3,
title=Ast.Text(line=3, text='Given first step'))])])
def test_parse_scenario_outline_with_examples():
""""""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,
gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,
gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.
TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.
TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
scenarios = parser.parse_scenarios()
scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line
=1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(
line=2, text='Given the <name> of a garden')), Ast.Step(line=3,
title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=
4, title=Ast.Text(line=4, text='And wait for <num_days> days')),
Ast.Step(line=5, title=Ast.Text(line=5, text=
'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast
.Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [
'Octopus', '5']])))])
<|reserved_special_token_0|>
def test_parse_feature_two_backgrounds():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Background: Some background
about the problem
Background: Some other background
will raise an exception
Scenario: Scenario title
Given first step
When second step
Then third step
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
<|reserved_special_token_0|>
def test_parse_feature():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,
gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,
'\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TEXT,
'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Feature title'), description=Ast.Text(line=2, text=
'feature description'), background=Ast.Background(line=3, title=Ast
.Text(line=3, text='Some background'), steps=[Ast.Step(line=4,
title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast
.Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),
steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=
'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,
text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(
line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(
line=9, text='When we take another step'))])]))
def test_parse_tables_within_steps():
"""Lexer.run() Should be able to parse example tables from steps"""
"""Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
Scenario: Plant a tree
Given the <name> of a garden
When I plant a tree
And wait for <num_days> days
Then I see it growing
"""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL,
'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_TEXT,
'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.
TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.
TOKEN_NEWLINE, '\n'), (14, gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Check models existence'), background=Ast.Background(line=2, steps=
[Ast.Step(line=3, title=Ast.Text(line=3, text=
'Given I have a garden in the database'), table=Ast.Table(line=4,
fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',
'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=
'And I have gardens in the database'), table=Ast.Table(line=7,
fields=[['name', 'area', 'raining'], ["Octopus' Garden", '120',
'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=
'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(
line=10, text='Given the <name> of a garden')), Ast.Step(line=11,
title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line
=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),
Ast.Step(line=13, title=Ast.Text(line=13, text=
'Then I see it growing'))])]))
<|reserved_special_token_0|>
def test_parse_text():
parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,
'tag2'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TAG,
'tag3'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Feature')])
tags = parser.parse_tags()
tags.should.equal(['tag1', 'tag2', 'tag3'])
def test_parse_tags_on_scenario_outline_examples():
"""Parser should allow tags to be defined in examples"""
parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,
gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL,
'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'
), (6, gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL,
'Examples'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=
'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.
ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[
'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',
'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))
<|reserved_special_token_0|>
def test_ast_node_equal():
n1 = Ast.Node()
n2 = Ast.Node()
n1.name = 'Lincoln'
n2.color = 'green'
equal = n1 == n2
equal.should.be.false
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lex_test_eof():
"""lex_text() Should be able to find EOF"""
lexer = gherkin.Lexer('')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_text():
"""lex_text() Should be able to find text before EOF"""
lexer = gherkin.Lexer('some text')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text'), (1,
gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_hash_with_text():
"""lex_text() Should stop lexing at # (we found a comment!)"""
lexer = gherkin.Lexer(' some text # random comment')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text ')])
new_state.should.equal(lexer.lex_comment)
def test_lex_comment():
"""lex_comment() Should stop lexing at \\n"""
lexer = gherkin.Lexer(' random comment')
new_state = lexer.lex_comment()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'random comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_meta_label():
"""lex_comment() Should stop lexing at : (we found a label)"""
lexer = gherkin.Lexer(' metadata: test')
new_state = lexer.lex_comment()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'metadata')])
new_state.should.equal(lexer.lex_comment_metadata_value)
<|reserved_special_token_0|>
def test_lex_comment_no_newline():
lexer = gherkin.Lexer(' test comment')
new_state = lexer.lex_comment_metadata_value()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_until_newline():
"""Lexer.lex_comment() Should parse comments until the newline character"""
lexer = gherkin.Lexer('# one line\n# another line')
tokens = lexer.run()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_COMMENT,
'another line'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_comment_full():
"""Lexer.run() Should be able to process metadata in comments"""
lexer = gherkin.Lexer('some text # metadata-field: blah-value\ntext')
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text '), (1, gherkin
.TOKEN_META_LABEL, 'metadata-field'), (1, gherkin.TOKEN_META_VALUE,
'blah-value'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'text'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_label():
"""Lexer.run() Should be able to parse a label with some text"""
lexer = gherkin.Lexer(
'Feature: A cool feature\n some more text\n even more text')
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'A cool feature'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'some more text'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_TEXT, 'even more text'), (3, gherkin.
TOKEN_EOF, '')])
def test_lex_text_with_labels():
"""Lexer.run() Should be able to tokenize a feature with a scenario"""
lexer = gherkin.Lexer(
"""
Feature: Some descriptive text
In order to parse a Gherkin file
As a parser
I want to be able to parse scenarios
Even more text
Scenario: The user wants to describe a feature
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,
gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.
TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TEXT,
'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,
gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,
gherkin.TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_steps():
"""Lexer.run() Should be able to tokenize steps"""
lexer = gherkin.Lexer(
"""Feature: Feature title
feature description
Background: Some background
about the problem
Scenario: Scenario title
Given first step
When second step
Then third step
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_EOF, '')])
def test_lex_load_languages():
"""Lexer.run() Should be able to parse different languages"""
lexer = gherkin.Lexer(
"""# language: pt-br
Funcionalidade: Interpretador para gherkin
Para escrever testes de aceitação
Como um programador
Preciso de uma ferramenta de BDD
Contexto:
Dado que a variavel "X" contém o número 2
Cenário: Lanche
Dada uma maçã
Quando mordida
Então a fome passa
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,
gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\n'
), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Funcionalidade'), (3, gherkin.TOKEN_TEXT,
'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,
gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Dado que a variavel "X" contém o número 2'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,
gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\n'), (
10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.
TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (
11, gherkin.TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\n'), (13,
gherkin.TOKEN_EOF, '')])
def test_lex_tables():
"""Lexer.run() Should be able to lex tables"""
lexer = gherkin.Lexer(""" Examples:
| column1 | column2 | """)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (
2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_tables_full():
"""Lexer.run() Should be able to lex scenario outlines"""
lexer = gherkin.Lexer(
""" Feature: gherkin has steps with examples
Scenario Outline: Add two numbers
Given I have <input_1> and <input_2> the calculator
When I press "Sum"!
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | output |
| 20 | 30 | 50 |
| 0 | 40 | 40 |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'gherkin has steps with examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(2, gherkin.TOKEN_TEXT, 'Add two numbers'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'Given I have <input_1> and <input_2> the calculator'), (3, gherkin
.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'When I press "Sum"!'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5,
gherkin.TOKEN_TEXT,
'Then the result should be <output> on the screen'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Examples'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TABLE_COLUMN,
'input_1'), (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'), (7, gherkin
.TOKEN_TABLE_COLUMN, 'output'), (7, gherkin.TOKEN_NEWLINE, '\n'), (
8, gherkin.TOKEN_TABLE_COLUMN, '20'), (8, gherkin.
TOKEN_TABLE_COLUMN, '30'), (8, gherkin.TOKEN_TABLE_COLUMN, '50'), (
8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TABLE_COLUMN,
'0'), (9, gherkin.TOKEN_TABLE_COLUMN, '40'), (9, gherkin.
TOKEN_TABLE_COLUMN, '40'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
def test_lex_tables_within_steps():
"""Lexer.run() Should be able to lex example tables from steps"""
lexer = gherkin.Lexer(
""" Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_EOF, '')])
def test_lex_multi_line_str():
"""Lexer.run() Should be able to find multi quoted strings after labels"""
lexer = gherkin.Lexer(
""" Given the following email template:
'''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''
And a cat picture
""\"Now notice we didn't use (:) above
""\"
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL,
'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_QUOTES, "'''"), (2, gherkin.TOKEN_TEXT,
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
), (5, gherkin.TOKEN_QUOTES, "'''"), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_QUOTES, '"""'), (7, gherkin
.TOKEN_TEXT, """Now notice we didn't use (:) above
"""), (8,
gherkin.TOKEN_QUOTES, '"""'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
def test_lex_tags_empty():
"""Lexer.lex_tag() Should bail if we reach EOF"""
lexer = gherkin.Lexer('')
lexer.lex_tag()
lexer.tokens.should.be.empty
def test_lex_tags():
"""Lexer.run() Should be able to find tags"""
lexer = gherkin.Lexer(
""" @tagged-feature
Feature: Parse tags
@tag1 @tag2
Scenario: Test
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TAG,
'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,
'Test'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_EOF, '')])
def test_parse_metadata_empty():
Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none
Parser([None]).parse_metadata().should.be.none
def test_parse_metadata_incomplete():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_EOF, '')])
parser.parse_metadata().should.be.none
def test_parse_metadata_syntax_error():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_TEXT, 'pt-br')])
parser.parse_metadata.when.called.should.throw(SyntaxError,
"No value found for the meta-field `language'")
<|reserved_special_token_0|>
def test_parse_empty_title():
parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'more text after title')])
feature = parser.parse_title()
feature.should.be.none
def test_parse_title():
parser = Parser([(1, gherkin.TOKEN_TEXT, 'Scenario title'), (1, gherkin
.TOKEN_NEWLINE, '\n')])
feature = parser.parse_title()
feature.should.equal(Ast.Text(line=1, text='Scenario title'))
def test_parse_table():
parser = Parser([(1, gherkin.TOKEN_TABLE_COLUMN, 'name'), (1, gherkin.
TOKEN_TABLE_COLUMN, 'email'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (2, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL, 'Scenario'),
(4, gherkin.TOKEN_EOF, '')])
feature = parser.parse_table()
feature.should.equal(Ast.Table(line=1, fields=[['name', 'email'], [
'Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))
def test_parse_background():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.
TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin
.TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,
gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])
feature = parser.parse_background()
feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text
='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=
'Given two users in the database'), table=Ast.Table(line=3, fields=
[['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))]))
def teste_parse_scenario():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,
text='Given first step'))])])
def teste_parse_scenario_with_description():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,
'More description'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.
TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), description=Ast.Text(line=2, text=
'Scenario description More description'), steps=[Ast.Step(line=3,
title=Ast.Text(line=3, text='Given first step'))])])
def test_parse_scenario_outline_with_examples():
""""""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,
gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,
gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.
TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.
TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
scenarios = parser.parse_scenarios()
scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line
=1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(
line=2, text='Given the <name> of a garden')), Ast.Step(line=3,
title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=
4, title=Ast.Text(line=4, text='And wait for <num_days> days')),
Ast.Step(line=5, title=Ast.Text(line=5, text=
'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast
.Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [
'Octopus', '5']])))])
<|reserved_special_token_0|>
def test_parse_feature_two_backgrounds():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Background: Some background
about the problem
Background: Some other background
will raise an exception
Scenario: Scenario title
Given first step
When second step
Then third step
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
def test_parse_feature_background_wrong_place():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Scenario: Scenario title
Given first step
When second step
Then third step
Background: Some background
about the problem
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
def test_parse_feature():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,
gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,
'\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TEXT,
'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Feature title'), description=Ast.Text(line=2, text=
'feature description'), background=Ast.Background(line=3, title=Ast
.Text(line=3, text='Some background'), steps=[Ast.Step(line=4,
title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast
.Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),
steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=
'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,
text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(
line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(
line=9, text='When we take another step'))])]))
def test_parse_tables_within_steps():
"""Lexer.run() Should be able to parse example tables from steps"""
"""Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
Scenario: Plant a tree
Given the <name> of a garden
When I plant a tree
And wait for <num_days> days
Then I see it growing
"""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL,
'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_TEXT,
'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.
TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.
TOKEN_NEWLINE, '\n'), (14, gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Check models existence'), background=Ast.Background(line=2, steps=
[Ast.Step(line=3, title=Ast.Text(line=3, text=
'Given I have a garden in the database'), table=Ast.Table(line=4,
fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',
'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=
'And I have gardens in the database'), table=Ast.Table(line=7,
fields=[['name', 'area', 'raining'], ["Octopus' Garden", '120',
'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=
'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(
line=10, text='Given the <name> of a garden')), Ast.Step(line=11,
title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line
=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),
Ast.Step(line=13, title=Ast.Text(line=13, text=
'Then I see it growing'))])]))
<|reserved_special_token_0|>
def test_parse_text():
parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,
'tag2'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TAG,
'tag3'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Feature')])
tags = parser.parse_tags()
tags.should.equal(['tag1', 'tag2', 'tag3'])
def test_parse_tags_on_scenario_outline_examples():
"""Parser should allow tags to be defined in examples"""
parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,
gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL,
'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'
), (6, gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL,
'Examples'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=
'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.
ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[
'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',
'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))
<|reserved_special_token_0|>
def test_ast_node_equal():
n1 = Ast.Node()
n2 = Ast.Node()
n1.name = 'Lincoln'
n2.color = 'green'
equal = n1 == n2
equal.should.be.false
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lex_test_eof():
"""lex_text() Should be able to find EOF"""
lexer = gherkin.Lexer('')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_text():
"""lex_text() Should be able to find text before EOF"""
lexer = gherkin.Lexer('some text')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text'), (1,
gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_hash_with_text():
"""lex_text() Should stop lexing at # (we found a comment!)"""
lexer = gherkin.Lexer(' some text # random comment')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text ')])
new_state.should.equal(lexer.lex_comment)
def test_lex_comment():
"""lex_comment() Should stop lexing at \\n"""
lexer = gherkin.Lexer(' random comment')
new_state = lexer.lex_comment()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'random comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_meta_label():
"""lex_comment() Should stop lexing at : (we found a label)"""
lexer = gherkin.Lexer(' metadata: test')
new_state = lexer.lex_comment()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'metadata')])
new_state.should.equal(lexer.lex_comment_metadata_value)
<|reserved_special_token_0|>
def test_lex_comment_no_newline():
lexer = gherkin.Lexer(' test comment')
new_state = lexer.lex_comment_metadata_value()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_until_newline():
"""Lexer.lex_comment() Should parse comments until the newline character"""
lexer = gherkin.Lexer('# one line\n# another line')
tokens = lexer.run()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_COMMENT,
'another line'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_comment_full():
"""Lexer.run() Should be able to process metadata in comments"""
lexer = gherkin.Lexer('some text # metadata-field: blah-value\ntext')
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text '), (1, gherkin
.TOKEN_META_LABEL, 'metadata-field'), (1, gherkin.TOKEN_META_VALUE,
'blah-value'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'text'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_label():
"""Lexer.run() Should be able to parse a label with some text"""
lexer = gherkin.Lexer(
'Feature: A cool feature\n some more text\n even more text')
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'A cool feature'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'some more text'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_TEXT, 'even more text'), (3, gherkin.
TOKEN_EOF, '')])
def test_lex_text_with_labels():
"""Lexer.run() Should be able to tokenize a feature with a scenario"""
lexer = gherkin.Lexer(
"""
Feature: Some descriptive text
In order to parse a Gherkin file
As a parser
I want to be able to parse scenarios
Even more text
Scenario: The user wants to describe a feature
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,
gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.
TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TEXT,
'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,
gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,
gherkin.TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_steps():
"""Lexer.run() Should be able to tokenize steps"""
lexer = gherkin.Lexer(
"""Feature: Feature title
feature description
Background: Some background
about the problem
Scenario: Scenario title
Given first step
When second step
Then third step
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_EOF, '')])
def test_lex_load_languages():
"""Lexer.run() Should be able to parse different languages"""
lexer = gherkin.Lexer(
"""# language: pt-br
Funcionalidade: Interpretador para gherkin
Para escrever testes de aceitação
Como um programador
Preciso de uma ferramenta de BDD
Contexto:
Dado que a variavel "X" contém o número 2
Cenário: Lanche
Dada uma maçã
Quando mordida
Então a fome passa
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,
gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\n'
), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Funcionalidade'), (3, gherkin.TOKEN_TEXT,
'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,
gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Dado que a variavel "X" contém o número 2'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,
gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\n'), (
10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.
TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (
11, gherkin.TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\n'), (13,
gherkin.TOKEN_EOF, '')])
def test_lex_tables():
"""Lexer.run() Should be able to lex tables"""
lexer = gherkin.Lexer(""" Examples:
| column1 | column2 | """)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (
2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_tables_full():
"""Lexer.run() Should be able to lex scenario outlines"""
lexer = gherkin.Lexer(
""" Feature: gherkin has steps with examples
Scenario Outline: Add two numbers
Given I have <input_1> and <input_2> the calculator
When I press "Sum"!
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | output |
| 20 | 30 | 50 |
| 0 | 40 | 40 |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'gherkin has steps with examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(2, gherkin.TOKEN_TEXT, 'Add two numbers'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'Given I have <input_1> and <input_2> the calculator'), (3, gherkin
.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'When I press "Sum"!'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5,
gherkin.TOKEN_TEXT,
'Then the result should be <output> on the screen'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Examples'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TABLE_COLUMN,
'input_1'), (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'), (7, gherkin
.TOKEN_TABLE_COLUMN, 'output'), (7, gherkin.TOKEN_NEWLINE, '\n'), (
8, gherkin.TOKEN_TABLE_COLUMN, '20'), (8, gherkin.
TOKEN_TABLE_COLUMN, '30'), (8, gherkin.TOKEN_TABLE_COLUMN, '50'), (
8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TABLE_COLUMN,
'0'), (9, gherkin.TOKEN_TABLE_COLUMN, '40'), (9, gherkin.
TOKEN_TABLE_COLUMN, '40'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
def test_lex_tables_within_steps():
"""Lexer.run() Should be able to lex example tables from steps"""
lexer = gherkin.Lexer(
""" Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_EOF, '')])
def test_lex_multi_line_str():
"""Lexer.run() Should be able to find multi quoted strings after labels"""
lexer = gherkin.Lexer(
""" Given the following email template:
'''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''
And a cat picture
""\"Now notice we didn't use (:) above
""\"
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL,
'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_QUOTES, "'''"), (2, gherkin.TOKEN_TEXT,
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
), (5, gherkin.TOKEN_QUOTES, "'''"), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_QUOTES, '"""'), (7, gherkin
.TOKEN_TEXT, """Now notice we didn't use (:) above
"""), (8,
gherkin.TOKEN_QUOTES, '"""'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
def test_lex_tags_empty():
"""Lexer.lex_tag() Should bail if we reach EOF"""
lexer = gherkin.Lexer('')
lexer.lex_tag()
lexer.tokens.should.be.empty
def test_lex_tags():
"""Lexer.run() Should be able to find tags"""
lexer = gherkin.Lexer(
""" @tagged-feature
Feature: Parse tags
@tag1 @tag2
Scenario: Test
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TAG,
'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,
'Test'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_EOF, '')])
def test_parse_metadata_empty():
Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none
Parser([None]).parse_metadata().should.be.none
def test_parse_metadata_incomplete():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_EOF, '')])
parser.parse_metadata().should.be.none
def test_parse_metadata_syntax_error():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_TEXT, 'pt-br')])
parser.parse_metadata.when.called.should.throw(SyntaxError,
"No value found for the meta-field `language'")
<|reserved_special_token_0|>
def test_parse_empty_title():
parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'more text after title')])
feature = parser.parse_title()
feature.should.be.none
def test_parse_title():
parser = Parser([(1, gherkin.TOKEN_TEXT, 'Scenario title'), (1, gherkin
.TOKEN_NEWLINE, '\n')])
feature = parser.parse_title()
feature.should.equal(Ast.Text(line=1, text='Scenario title'))
def test_parse_table():
parser = Parser([(1, gherkin.TOKEN_TABLE_COLUMN, 'name'), (1, gherkin.
TOKEN_TABLE_COLUMN, 'email'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (2, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL, 'Scenario'),
(4, gherkin.TOKEN_EOF, '')])
feature = parser.parse_table()
feature.should.equal(Ast.Table(line=1, fields=[['name', 'email'], [
'Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))
def test_parse_background():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.
TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin
.TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,
gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])
feature = parser.parse_background()
feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text
='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=
'Given two users in the database'), table=Ast.Table(line=3, fields=
[['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))]))
def teste_parse_scenario():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,
text='Given first step'))])])
def teste_parse_scenario_with_description():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,
'More description'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.
TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), description=Ast.Text(line=2, text=
'Scenario description More description'), steps=[Ast.Step(line=3,
title=Ast.Text(line=3, text='Given first step'))])])
def test_parse_scenario_outline_with_examples():
""""""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,
gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,
gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.
TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.
TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
scenarios = parser.parse_scenarios()
scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line
=1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(
line=2, text='Given the <name> of a garden')), Ast.Step(line=3,
title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=
4, title=Ast.Text(line=4, text='And wait for <num_days> days')),
Ast.Step(line=5, title=Ast.Text(line=5, text=
'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast
.Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [
'Octopus', '5']])))])
<|reserved_special_token_0|>
def test_parse_feature_two_backgrounds():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Background: Some background
about the problem
Background: Some other background
will raise an exception
Scenario: Scenario title
Given first step
When second step
Then third step
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
def test_parse_feature_background_wrong_place():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Scenario: Scenario title
Given first step
When second step
Then third step
Background: Some background
about the problem
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
def test_parse_feature():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,
gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,
'\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TEXT,
'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Feature title'), description=Ast.Text(line=2, text=
'feature description'), background=Ast.Background(line=3, title=Ast
.Text(line=3, text='Some background'), steps=[Ast.Step(line=4,
title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast
.Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),
steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=
'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,
text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(
line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(
line=9, text='When we take another step'))])]))
def test_parse_tables_within_steps():
"""Lexer.run() Should be able to parse example tables from steps"""
"""Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
Scenario: Plant a tree
Given the <name> of a garden
When I plant a tree
And wait for <num_days> days
Then I see it growing
"""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL,
'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_TEXT,
'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.
TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.
TOKEN_NEWLINE, '\n'), (14, gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Check models existence'), background=Ast.Background(line=2, steps=
[Ast.Step(line=3, title=Ast.Text(line=3, text=
'Given I have a garden in the database'), table=Ast.Table(line=4,
fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',
'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=
'And I have gardens in the database'), table=Ast.Table(line=7,
fields=[['name', 'area', 'raining'], ["Octopus' Garden", '120',
'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=
'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(
line=10, text='Given the <name> of a garden')), Ast.Step(line=11,
title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line
=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),
Ast.Step(line=13, title=Ast.Text(line=13, text=
'Then I see it growing'))])]))
def test_parse_quoted_strings_on_steps():
parser = Parser([(1, gherkin.TOKEN_LABEL,
'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_QUOTES, "'''"), (2, gherkin.TOKEN_TEXT,
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
), (5, gherkin.TOKEN_QUOTES, "'''"), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_QUOTES, '"""'), (7, gherkin
.TOKEN_TEXT, """Now notice we didn't use (:) above
"""), (8,
gherkin.TOKEN_QUOTES, '"""'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
steps = parser.parse_steps()
steps.should.equal([Ast.Step(line=1, title=Ast.Text(line=1, text=
'Given the following email template'), text=Ast.Text(line=2, text=
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
)), Ast.Step(line=6, title=Ast.Text(line=6, text=
'And a cat picture'), text=Ast.Text(line=7, text=
"""Now notice we didn't use (:) above
"""))])
def test_parse_text():
parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,
'tag2'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TAG,
'tag3'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Feature')])
tags = parser.parse_tags()
tags.should.equal(['tag1', 'tag2', 'tag3'])
def test_parse_tags_on_scenario_outline_examples():
"""Parser should allow tags to be defined in examples"""
parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,
gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL,
'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'
), (6, gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL,
'Examples'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=
'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.
ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[
'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',
'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))
<|reserved_special_token_0|>
def test_ast_node_equal():
n1 = Ast.Node()
n2 = Ast.Node()
n1.name = 'Lincoln'
n2.color = 'green'
equal = n1 == n2
equal.should.be.false
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lex_test_eof():
"""lex_text() Should be able to find EOF"""
lexer = gherkin.Lexer('')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_text():
"""lex_text() Should be able to find text before EOF"""
lexer = gherkin.Lexer('some text')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text'), (1,
gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_hash_with_text():
"""lex_text() Should stop lexing at # (we found a comment!)"""
lexer = gherkin.Lexer(' some text # random comment')
new_state = lexer.lex_text()
lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text ')])
new_state.should.equal(lexer.lex_comment)
def test_lex_comment():
"""lex_comment() Should stop lexing at \\n"""
lexer = gherkin.Lexer(' random comment')
new_state = lexer.lex_comment()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'random comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_meta_label():
"""lex_comment() Should stop lexing at : (we found a label)"""
lexer = gherkin.Lexer(' metadata: test')
new_state = lexer.lex_comment()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'metadata')])
new_state.should.equal(lexer.lex_comment_metadata_value)
def test_lex_comment_metadata_value():
"""lex_comment_metadata_value() Should stop lexing at
"""
lexer = gherkin.Lexer(' test value\nblah')
new_state = lexer.lex_comment_metadata_value()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test value')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_no_newline():
lexer = gherkin.Lexer(' test comment')
new_state = lexer.lex_comment_metadata_value()
lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])
new_state.should.equal(lexer.lex_text)
def test_lex_comment_until_newline():
"""Lexer.lex_comment() Should parse comments until the newline character"""
lexer = gherkin.Lexer('# one line\n# another line')
tokens = lexer.run()
lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_COMMENT,
'another line'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_comment_full():
"""Lexer.run() Should be able to process metadata in comments"""
lexer = gherkin.Lexer('some text # metadata-field: blah-value\ntext')
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text '), (1, gherkin
.TOKEN_META_LABEL, 'metadata-field'), (1, gherkin.TOKEN_META_VALUE,
'blah-value'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'text'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_label():
"""Lexer.run() Should be able to parse a label with some text"""
lexer = gherkin.Lexer(
'Feature: A cool feature\n some more text\n even more text')
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'A cool feature'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'some more text'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_TEXT, 'even more text'), (3, gherkin.
TOKEN_EOF, '')])
def test_lex_text_with_labels():
"""Lexer.run() Should be able to tokenize a feature with a scenario"""
lexer = gherkin.Lexer(
"""
Feature: Some descriptive text
In order to parse a Gherkin file
As a parser
I want to be able to parse scenarios
Even more text
Scenario: The user wants to describe a feature
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,
gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.
TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TEXT,
'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,
gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,
gherkin.TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_EOF, '')])
def test_lex_text_with_steps():
"""Lexer.run() Should be able to tokenize steps"""
lexer = gherkin.Lexer(
"""Feature: Feature title
feature description
Background: Some background
about the problem
Scenario: Scenario title
Given first step
When second step
Then third step
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.
TOKEN_EOF, '')])
def test_lex_load_languages():
"""Lexer.run() Should be able to parse different languages"""
lexer = gherkin.Lexer(
"""# language: pt-br
Funcionalidade: Interpretador para gherkin
Para escrever testes de aceitação
Como um programador
Preciso de uma ferramenta de BDD
Contexto:
Dado que a variavel "X" contém o número 2
Cenário: Lanche
Dada uma maçã
Quando mordida
Então a fome passa
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,
gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\n'
), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Funcionalidade'), (3, gherkin.TOKEN_TEXT,
'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,
gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),
(7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.TOKEN_TEXT,
'Dado que a variavel "X" contém o número 2'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,
gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\n'), (
10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.
TOKEN_NEWLINE, '\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (
11, gherkin.TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\n'), (13,
gherkin.TOKEN_EOF, '')])
def test_lex_tables():
"""Lexer.run() Should be able to lex tables"""
lexer = gherkin.Lexer(""" Examples:
| column1 | column2 | """)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (
2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])
def test_lex_tables_full():
"""Lexer.run() Should be able to lex scenario outlines"""
lexer = gherkin.Lexer(
""" Feature: gherkin has steps with examples
Scenario Outline: Add two numbers
Given I have <input_1> and <input_2> the calculator
When I press "Sum"!
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | output |
| 20 | 30 | 50 |
| 0 | 40 | 40 |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'gherkin has steps with examples'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(2, gherkin.TOKEN_TEXT, 'Add two numbers'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'Given I have <input_1> and <input_2> the calculator'), (3, gherkin
.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TEXT,
'When I press "Sum"!'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5,
gherkin.TOKEN_TEXT,
'Then the result should be <output> on the screen'), (5, gherkin.
TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Examples'), (6,
gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_TABLE_COLUMN,
'input_1'), (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'), (7, gherkin
.TOKEN_TABLE_COLUMN, 'output'), (7, gherkin.TOKEN_NEWLINE, '\n'), (
8, gherkin.TOKEN_TABLE_COLUMN, '20'), (8, gherkin.
TOKEN_TABLE_COLUMN, '30'), (8, gherkin.TOKEN_TABLE_COLUMN, '50'), (
8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TABLE_COLUMN,
'0'), (9, gherkin.TOKEN_TABLE_COLUMN, '40'), (9, gherkin.
TOKEN_TABLE_COLUMN, '40'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
def test_lex_tables_within_steps():
"""Lexer.run() Should be able to lex example tables from steps"""
lexer = gherkin.Lexer(
""" Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_EOF, '')])
def test_lex_multi_line_str():
"""Lexer.run() Should be able to find multi quoted strings after labels"""
lexer = gherkin.Lexer(
""" Given the following email template:
'''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''
And a cat picture
""\"Now notice we didn't use (:) above
""\"
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_LABEL,
'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_QUOTES, "'''"), (2, gherkin.TOKEN_TEXT,
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
), (5, gherkin.TOKEN_QUOTES, "'''"), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_QUOTES, '"""'), (7, gherkin
.TOKEN_TEXT, """Now notice we didn't use (:) above
"""), (8,
gherkin.TOKEN_QUOTES, '"""'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
def test_lex_tags_empty():
"""Lexer.lex_tag() Should bail if we reach EOF"""
lexer = gherkin.Lexer('')
lexer.lex_tag()
lexer.tokens.should.be.empty
def test_lex_tags():
"""Lexer.run() Should be able to find tags"""
lexer = gherkin.Lexer(
""" @tagged-feature
Feature: Parse tags
@tag1 @tag2
Scenario: Test
"""
)
tokens = lexer.run()
tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,
gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,
'\n'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TAG,
'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,
'\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,
'Test'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_EOF, '')])
def test_parse_metadata_empty():
Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none
Parser([None]).parse_metadata().should.be.none
def test_parse_metadata_incomplete():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_EOF, '')])
parser.parse_metadata().should.be.none
def test_parse_metadata_syntax_error():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_TEXT, 'pt-br')])
parser.parse_metadata.when.called.should.throw(SyntaxError,
"No value found for the meta-field `language'")
def test_parse_metadata():
parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin
.TOKEN_META_VALUE, 'pt-br')])
metadata = parser.parse_metadata()
metadata.should.equal(Ast.Metadata(line=1, key='language', value='pt-br'))
def test_parse_empty_title():
parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.
TOKEN_TEXT, 'more text after title')])
feature = parser.parse_title()
feature.should.be.none
def test_parse_title():
parser = Parser([(1, gherkin.TOKEN_TEXT, 'Scenario title'), (1, gherkin
.TOKEN_NEWLINE, '\n')])
feature = parser.parse_title()
feature.should.equal(Ast.Text(line=1, text='Scenario title'))
def test_parse_table():
parser = Parser([(1, gherkin.TOKEN_TABLE_COLUMN, 'name'), (1, gherkin.
TOKEN_TABLE_COLUMN, 'email'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (2, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL, 'Scenario'),
(4, gherkin.TOKEN_EOF, '')])
feature = parser.parse_table()
feature.should.equal(Ast.Table(line=1, fields=[['name', 'email'], [
'Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))
def test_parse_background():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.
TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin
.TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,
gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.
TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (
5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])
feature = parser.parse_background()
feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text
='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=
'Given two users in the database'), table=Ast.Table(line=3, fields=
[['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',
'[email protected]']]))]))
def teste_parse_scenario():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,
text='Given first step'))])])
def teste_parse_scenario_with_description():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.
TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,
'More description'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.
TOKEN_TEXT, 'Given first step')])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=
'Scenario title'), description=Ast.Text(line=2, text=
'Scenario description More description'), steps=[Ast.Step(line=3,
title=Ast.Text(line=3, text='Given first step'))])])
def test_parse_scenario_outline_with_examples():
""""""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,
gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,
gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_TEXT,
'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\n'), (4,
gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TEXT,
'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\n'), (6,
gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.
TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.
TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
scenarios = parser.parse_scenarios()
scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line
=1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(
line=2, text='Given the <name> of a garden')), Ast.Step(line=3,
title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=
4, title=Ast.Text(line=4, text='And wait for <num_days> days')),
Ast.Step(line=5, title=Ast.Text(line=5, text=
'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast
.Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [
'Octopus', '5']])))])
def test_parse_not_starting_with_feature():
parser = gherkin.Parser(gherkin.Lexer(
"""
Scenario: Scenario title
Given first step
When second step
Then third step
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"Feature expected in the beginning of the file, found `Scenario' though."
)
def test_parse_feature_two_backgrounds():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Background: Some background
about the problem
Background: Some other background
will raise an exception
Scenario: Scenario title
Given first step
When second step
Then third step
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
def test_parse_feature_background_wrong_place():
parser = gherkin.Parser(gherkin.Lexer(
"""
Feature: Feature title
feature description
Scenario: Scenario title
Given first step
When second step
Then third step
Background: Some background
about the problem
"""
).run())
parser.parse_feature.when.called.should.throw(SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected"
)
def test_parse_feature():
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2,
gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,
gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,
gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,
gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,
'\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.
TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_TEXT,
'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\n'), (10,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Feature title'), description=Ast.Text(line=2, text=
'feature description'), background=Ast.Background(line=3, title=Ast
.Text(line=3, text='Some background'), steps=[Ast.Step(line=4,
title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast
.Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),
steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=
'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,
text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(
line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(
line=9, text='When we take another step'))])]))
def test_parse_tables_within_steps():
"""Lexer.run() Should be able to parse example tables from steps"""
"""Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
Scenario: Plant a tree
Given the <name> of a garden
When I plant a tree
And wait for <num_days> days
Then I see it growing
"""
parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.
TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.
TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,
'\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5, gherkin.
TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.
TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_LABEL,
'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,
'\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.
TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,
'raining'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, "Octopus' Garden"), (8, gherkin.
TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'), (9, gherkin.TOKEN_LABEL,
'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.
TOKEN_NEWLINE, '\n'), (10, gherkin.TOKEN_TEXT,
'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.
TOKEN_NEWLINE, '\n'), (12, gherkin.TOKEN_TEXT,
'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.
TOKEN_NEWLINE, '\n'), (14, gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=
'Check models existence'), background=Ast.Background(line=2, steps=
[Ast.Step(line=3, title=Ast.Text(line=3, text=
'Given I have a garden in the database'), table=Ast.Table(line=4,
fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',
'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=
'And I have gardens in the database'), table=Ast.Table(line=7,
fields=[['name', 'area', 'raining'], ["Octopus' Garden", '120',
'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=
'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(
line=10, text='Given the <name> of a garden')), Ast.Step(line=11,
title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line
=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),
Ast.Step(line=13, title=Ast.Text(line=13, text=
'Then I see it growing'))])]))
def test_parse_quoted_strings_on_steps():
parser = Parser([(1, gherkin.TOKEN_LABEL,
'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,
'\n'), (2, gherkin.TOKEN_QUOTES, "'''"), (2, gherkin.TOKEN_TEXT,
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
), (5, gherkin.TOKEN_QUOTES, "'''"), (5, gherkin.TOKEN_NEWLINE,
'\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.
TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_QUOTES, '"""'), (7, gherkin
.TOKEN_TEXT, """Now notice we didn't use (:) above
"""), (8,
gherkin.TOKEN_QUOTES, '"""'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
steps = parser.parse_steps()
steps.should.equal([Ast.Step(line=1, title=Ast.Text(line=1, text=
'Given the following email template'), text=Ast.Text(line=2, text=
"""Here we go with a pretty
big block of text
surrounded by triple quoted strings
"""
)), Ast.Step(line=6, title=Ast.Text(line=6, text=
'And a cat picture'), text=Ast.Text(line=7, text=
"""Now notice we didn't use (:) above
"""))])
def test_parse_text():
parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,
'tag2'), (1, gherkin.TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_TAG,
'tag3'), (2, gherkin.TOKEN_NEWLINE, '\n'), (3, gherkin.TOKEN_LABEL,
'Feature')])
tags = parser.parse_tags()
tags.should.equal(['tag1', 'tag2', 'tag3'])
def test_parse_tags_on_scenario_outline_examples():
"""Parser should allow tags to be defined in examples"""
parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,
gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,
gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_LABEL,
'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.
TOKEN_NEWLINE, '\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,
gherkin.TOKEN_NEWLINE, '\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'
), (6, gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_LABEL,
'Examples'), (7, gherkin.TOKEN_NEWLINE, '\n'), (8, gherkin.
TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\n'), (9,
gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=
'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.
ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[
'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',
'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))
def test_parse_tags_on_feature_and_scenario():
parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.
TOKEN_NEWLINE, '\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,
gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_NEWLINE, '\n'), (4, gherkin.TOKEN_TAG, 'tag1'), (
4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE, '\n'), (5,
gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT, 'Test'),
(6, gherkin.TOKEN_NEWLINE, '\n'), (7, gherkin.TOKEN_EOF, '')])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=
'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.Scenario(
line=5, title=Ast.Text(line=5, text='Test'), tags=['tag1', 'tag2'])]))
def test_ast_node_equal():
n1 = Ast.Node()
n2 = Ast.Node()
n1.name = 'Lincoln'
n2.color = 'green'
equal = n1 == n2
equal.should.be.false
<|reserved_special_token_1|>
# -*- coding: utf-8; -*-
import gherkin
from gherkin import Lexer, Parser, Ast
def test_lex_test_eof():
"lex_text() Should be able to find EOF"
# Given a lexer that takes '' as the input string
lexer = gherkin.Lexer('')
# When we try to lex any text from ''
new_state = lexer.lex_text()
# Then we see we've got to EOF and that new state is nil
lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_text():
"lex_text() Should be able to find text before EOF"
# Given a lexer that takes some text as input string
lexer = gherkin.Lexer('some text')
# When we lex it
new_state = lexer.lex_text()
# Then we see we found both the text and the EOF token
lexer.tokens.should.equal([
(1, gherkin.TOKEN_TEXT, 'some text'),
(1, gherkin.TOKEN_EOF, '')
])
# And the new state is nil
new_state.should.be.none
def test_lex_hash_with_text():
"lex_text() Should stop lexing at # (we found a comment!)"
# Given a lexer with some text and some comment
lexer = gherkin.Lexer(' some text # random comment')
# When the input is lexed through the text lexer
new_state = lexer.lex_text()
# Then we see the following token on the output list
lexer.tokens.should.equal([
(1, gherkin.TOKEN_TEXT, 'some text '),
])
# And that the next state will lex comments
new_state.should.equal(lexer.lex_comment)
def test_lex_comment():
"lex_comment() Should stop lexing at \\n"
# Given a lexer loaded with some comments
lexer = gherkin.Lexer(' random comment')
# When We lex the input text
new_state = lexer.lex_comment()
# Then we see the comment above was captured
lexer.tokens.should.equal([
(1, gherkin.TOKEN_COMMENT, 'random comment'),
])
# And that new state is lex_text()
new_state.should.equal(lexer.lex_text)
def test_lex_comment_meta_label():
"lex_comment() Should stop lexing at : (we found a label)"
# Given a lexer loaded with a comment that contains a label
lexer = gherkin.Lexer(' metadata: test')
# When we lex the comment
new_state = lexer.lex_comment()
# Then we see that a label was found
lexer.tokens.should.equal([
(1, gherkin.TOKEN_META_LABEL, 'metadata'),
])
# And that new state is going to read the value of the variable we
# just found
new_state.should.equal(lexer.lex_comment_metadata_value)
def test_lex_comment_metadata_value():
"lex_comment_metadata_value() Should stop lexing at \n"
# Given a lexer loaded with the value of a label and a new line
# with more text
lexer = gherkin.Lexer(' test value\nblah')
# When we lex the input string
new_state = lexer.lex_comment_metadata_value()
# Then we see that only the value present is the one before the
# \n, everything else will be lexed by lex_text
lexer.tokens.should.equal([
(1, gherkin.TOKEN_META_VALUE, 'test value'),
])
# And we also see that the next
new_state.should.equal(lexer.lex_text)
def test_lex_comment_no_newline():
# Given a lexer loaded with a comment without the newline marker
lexer = gherkin.Lexer(' test comment')
# When we lex the input string
new_state = lexer.lex_comment_metadata_value()
# Then we see the whole line was captured
lexer.tokens.should.equal([
(1, gherkin.TOKEN_META_VALUE, 'test comment'),
])
# And we also see that the next
new_state.should.equal(lexer.lex_text)
def test_lex_comment_until_newline():
"Lexer.lex_comment() Should parse comments until the newline character"
# Given a lexer loaded with comments containing a metadata field
lexer = gherkin.Lexer('# one line\n# another line')
# When I run the lexer
tokens = lexer.run()
# Then we see both lines were captured
lexer.tokens.should.equal([
(1, gherkin.TOKEN_COMMENT, 'one line'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_COMMENT, 'another line'),
(2, gherkin.TOKEN_EOF, ''),
])
def test_lex_comment_full():
"Lexer.run() Should be able to process metadata in comments"
# Given a lexer loaded with comments containing a metadata field
lexer = gherkin.Lexer('some text # metadata-field: blah-value\ntext')
# When I run the lexer
tokens = lexer.run()
# Then I see the tokens collected match some text, a field, more
# text and EOF
tokens.should.equal([
(1, gherkin.TOKEN_TEXT, 'some text '),
(1, gherkin.TOKEN_META_LABEL, 'metadata-field'),
(1, gherkin.TOKEN_META_VALUE, 'blah-value'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'text'),
(2, gherkin.TOKEN_EOF, '')
])
def test_lex_text_with_label():
"Lexer.run() Should be able to parse a label with some text"
# Given a lexer loaded with a feature
lexer = gherkin.Lexer(
'Feature: A cool feature\n some more text\n even more text')
# When we run the lexer
tokens = lexer.run()
# Then we see the token list matches the label, text, text EOF
# sequence
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'A cool feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'some more text'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'even more text'),
(3, gherkin.TOKEN_EOF, '')
])
def test_lex_text_with_labels():
"Lexer.run() Should be able to tokenize a feature with a scenario"
# Given a lexer with a more complete feature+scenario
lexer = gherkin.Lexer('''
Feature: Some descriptive text
In order to parse a Gherkin file
As a parser
I want to be able to parse scenarios
Even more text
Scenario: The user wants to describe a feature
''')
# When we run the lexer
tokens = lexer.run()
# Then we see it was broken down into the right list of tokens
tokens.should.equal([
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Feature'),
(3, gherkin.TOKEN_TEXT, 'Some descriptive text'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'In order to parse a Gherkin file'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'As a parser'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'I want to be able to parse scenarios'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Even more text'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_LABEL, 'Scenario'),
(10, gherkin.TOKEN_TEXT, 'The user wants to describe a feature'),
(10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_EOF, '')
])
def test_lex_text_with_steps():
"Lexer.run() Should be able to tokenize steps"
# Given a lexer loaded with feature+background+scenario+steps
lexer = gherkin.Lexer('''\
Feature: Feature title
feature description
Background: Some background
about the problem
Scenario: Scenario title
Given first step
When second step
Then third step
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that everything, including the steps was properly
# tokenized
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Feature title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'feature description'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Background'),
(3, gherkin.TOKEN_TEXT, 'Some background'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'about the problem'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Scenario title'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'Given first step'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TEXT, 'When second step'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Then third step'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
def test_lex_load_languages():
"Lexer.run() Should be able to parse different languages"
# Given the following lexer instance loaded with another language
lexer = gherkin.Lexer('''# language: pt-br
Funcionalidade: Interpretador para gherkin
Para escrever testes de aceitação
Como um programador
Preciso de uma ferramenta de BDD
Contexto:
Dado que a variavel "X" contém o número 2
Cenário: Lanche
Dada uma maçã
Quando mordida
Então a fome passa
''')
# When we run the lexer
tokens = lexer.run()
# Then the following list of tokens is generated
tokens.should.equal([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_META_VALUE, 'pt-br'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Funcionalidade'),
(3, gherkin.TOKEN_TEXT, 'Interpretador para gherkin'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'Como um programador'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_LABEL, 'Contexto'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Dado que a variavel "X" contém o número 2'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_LABEL, 'Cenário'),
(9, gherkin.TOKEN_TEXT, 'Lanche'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_TEXT, 'Dada uma maçã'),
(10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'Quando mordida'),
(11, gherkin.TOKEN_NEWLINE, '\n'),
(12, gherkin.TOKEN_TEXT, 'Então a fome passa'),
(12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_EOF, '')
])
def test_lex_tables():
"Lexer.run() Should be able to lex tables"
# Given the following lexer loaded with an examples label followed
# by a table that ends before '\n'
lexer = gherkin.Lexer('''\
Examples:
| column1 | column2 | ''')
# When we run the lexer
tokens = lexer.run()
# Then we see the scenario outline case was properly parsed
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Examples'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'column1'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'column2'),
(2, gherkin.TOKEN_EOF, ''),
])
def test_lex_tables_full():
"Lexer.run() Should be able to lex scenario outlines"
lexer = gherkin.Lexer('''\
Feature: gherkin has steps with examples
Scenario Outline: Add two numbers
Given I have <input_1> and <input_2> the calculator
When I press "Sum"!
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | output |
| 20 | 30 | 50 |
| 0 | 40 | 40 |
''')
# When we run the lexer
tokens = lexer.run()
# Then we see the scenario outline case was properly parsed
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'gherkin has steps with examples'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(2, gherkin.TOKEN_TEXT, 'Add two numbers'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'Given I have <input_1> and <input_2> the calculator'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'When I press "Sum"!'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'Then the result should be <output> on the screen'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'Examples'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'input_1'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'output'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, '20'),
(8, gherkin.TOKEN_TABLE_COLUMN, '30'),
(8, gherkin.TOKEN_TABLE_COLUMN, '50'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_TABLE_COLUMN, '0'),
(9, gherkin.TOKEN_TABLE_COLUMN, '40'),
(9, gherkin.TOKEN_TABLE_COLUMN, '40'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_EOF, '')
])
def test_lex_tables_within_steps():
"Lexer.run() Should be able to lex example tables from steps"
# Given a lexer loaded with steps that contain example tables
lexer = gherkin.Lexer('''\
Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that steps that contain : will be identified as
# labels
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Check models existence'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Background'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Given I have a garden in the database'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TABLE_COLUMN, '@name'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'Secret Garden'),
(5, gherkin.TOKEN_TABLE_COLUMN, '45'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'And I have gardens in the database'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Octopus\' Garden'),
(8, gherkin.TOKEN_TABLE_COLUMN, '120'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
def test_lex_multi_line_str():
"Lexer.run() Should be able to find multi quoted strings after labels"
# Given a lexer loaded with steps that contain example tables
lexer = gherkin.Lexer('''\
Given the following email template:
''\'Here we go with a pretty
big block of text
surrounded by triple quoted strings
''\'
And a cat picture
"""Now notice we didn't use (:) above
"""
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that triple quoted strings are captured by the lexer
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Given the following email template'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_QUOTES, "'''"),
(2, gherkin.TOKEN_TEXT, '''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''),
(5, gherkin.TOKEN_QUOTES, "'''"),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'And a cat picture'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_QUOTES, '"""'),
(7, gherkin.TOKEN_TEXT, "Now notice we didn't use (:) above\n "),
(8, gherkin.TOKEN_QUOTES, '"""'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
def test_lex_tags_empty():
"Lexer.lex_tag() Should bail if we reach EOF"
# Given a lexer loaded with an empty string
lexer = gherkin.Lexer('')
# When we try to lex tags
lexer.lex_tag()
# Then we see we found no tokens
lexer.tokens.should.be.empty
def test_lex_tags():
"Lexer.run() Should be able to find tags"
# Given a lexer loaded with steps that contain example tables
lexer = gherkin.Lexer('''\
@tagged-feature
Feature: Parse tags
@tag1 @tag2
Scenario: Test
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that triple quoted strings are captured by the lexer
tokens.should.equal([
(1, gherkin.TOKEN_TAG, 'tagged-feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TAG, 'tag1'),
(4, gherkin.TOKEN_TAG, 'tag2'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Test'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_EOF, ''),
])
def test_parse_metadata_empty():
Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none
Parser([None]).parse_metadata().should.be.none
def test_parse_metadata_incomplete():
parser = Parser([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_EOF, ''),
])
parser.parse_metadata().should.be.none
def test_parse_metadata_syntax_error():
parser = Parser([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_TEXT, 'pt-br'),
])
parser.parse_metadata.when.called.should.throw(
SyntaxError, 'No value found for the meta-field `language\'')
def test_parse_metadata():
parser = Parser([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_META_VALUE, 'pt-br'),
])
metadata = parser.parse_metadata()
metadata.should.equal(Ast.Metadata(line=1, key='language', value='pt-br'))
def test_parse_empty_title():
parser = Parser([
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'more text after title'),
])
feature = parser.parse_title()
feature.should.be.none
def test_parse_title():
parser = Parser([
(1, gherkin.TOKEN_TEXT, 'Scenario title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
])
feature = parser.parse_title()
feature.should.equal(Ast.Text(line=1, text='Scenario title'))
def test_parse_table():
parser = Parser([
(1, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(1, gherkin.TOKEN_TABLE_COLUMN, 'email'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'),
(2, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'),
(3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_LABEL, 'Scenario'),
(4, gherkin.TOKEN_EOF, ''),
])
feature = parser.parse_table()
feature.should.equal(Ast.Table(line=1, fields=[
['name', 'email'],
['Lincoln', '[email protected]'],
['Gabriel', '[email protected]'],
]))
def test_parse_background():
# Background: title
# Given two users in the database:
# | name | email |
# | Lincoln | [email protected] |
# | Gabriel | [email protected] |
# Scenario:
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Background'),
(1, gherkin.TOKEN_TEXT, 'title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Given two users in the database'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'email'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'),
(4, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'),
(5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'Scenario'),
])
# When the background is parsed
feature = parser.parse_background()
# Then I see the output contains a valid background with a step
# with examples. Notice the scenario label is not returned
# anywhere here
feature.should.equal(Ast.Background(
line=1,
title=Ast.Text(line=1, text='title'),
steps=[
Ast.Step(
line=2,
title=Ast.Text(line=2, text='Given two users in the database'),
table=Ast.Table(line=3, fields=[
['name', 'email'],
['Lincoln', '[email protected]'],
['Gabriel', '[email protected]'],
]))
]))
## Scenarios
def teste_parse_scenario():
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Scenario'),
(1, gherkin.TOKEN_TEXT, 'Scenario title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'Given first step'),
])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(
line=1,
title=Ast.Text(line=1, text='Scenario title'),
steps=[Ast.Step(line=2, title=Ast.Text(line=2, text='Given first step'))],
)])
def teste_parse_scenario_with_description():
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Scenario'),
(1, gherkin.TOKEN_TEXT, 'Scenario title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'Scenario description'),
(2, gherkin.TOKEN_TEXT, 'More description'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'Given first step'),
])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(
line=1,
title=Ast.Text(line=1, text='Scenario title'),
description=Ast.Text( line=2, text='Scenario description More description'),
steps=[Ast.Step(line=3, title=Ast.Text(line=3, text='Given first step'))],
)])
def test_parse_scenario_outline_with_examples():
""
# Given a parser loaded with the following gherkin document:
#
# Scenario Outline: Plant a tree
# Given the <name> of a garden
# When I plant a tree
# And wait for <num_days> days
# Then I see it growing
# Examples:
# | name | num_days |
# | Secret | 2 |
# | Octopus | 5 |
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(1, gherkin.TOKEN_TEXT, 'Plant a tree'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'When I plant a tree'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'And wait for <num_days> days'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'Then I see it growing'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'Examples'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'num_days'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'),
(8, gherkin.TOKEN_TABLE_COLUMN, '2'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_TABLE_COLUMN, 'Octopus'),
(9, gherkin.TOKEN_TABLE_COLUMN, '5'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_EOF, '')
])
scenarios = parser.parse_scenarios()
scenarios.should.equal([
Ast.ScenarioOutline(
line=1,
title=Ast.Text(line=1, text='Plant a tree'),
steps=[Ast.Step(line=2, title=Ast.Text(line=2, text='Given the <name> of a garden')),
Ast.Step(line=3, title=Ast.Text(line=3, text='When I plant a tree')),
Ast.Step(line=4, title=Ast.Text(line=4, text='And wait for <num_days> days')),
Ast.Step(line=5, title=Ast.Text(line=5, text='Then I see it growing'))],
examples=Ast.Examples(line=6, table=Ast.Table(line=7, fields=[
['name', 'num_days'],
['Secret', '2'],
['Octopus', '5'],
]))
)])
def test_parse_not_starting_with_feature():
parser = gherkin.Parser(gherkin.Lexer('''
Scenario: Scenario title
Given first step
When second step
Then third step
''').run())
parser.parse_feature.when.called.should.throw(
SyntaxError,
"Feature expected in the beginning of the file, "
"found `Scenario' though.")
def test_parse_feature_two_backgrounds():
parser = gherkin.Parser(gherkin.Lexer('''
Feature: Feature title
feature description
Background: Some background
about the problem
Background: Some other background
will raise an exception
Scenario: Scenario title
Given first step
When second step
Then third step
''').run())
parser.parse_feature.when.called.should.throw(
SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected")
def test_parse_feature_background_wrong_place():
parser = gherkin.Parser(gherkin.Lexer('''
Feature: Feature title
feature description
Scenario: Scenario title
Given first step
When second step
Then third step
Background: Some background
about the problem
''').run())
parser.parse_feature.when.called.should.throw(
SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected")
def test_parse_feature():
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Feature title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'feature description'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Background'),
(3, gherkin.TOKEN_TEXT, 'Some background'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'Given the problem'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Scenario title'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'Given first step'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_LABEL, 'Scenario'),
(7, gherkin.TOKEN_TEXT, 'Another scenario'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Given this step'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_TEXT, 'When we take another step'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_EOF, ''),
])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(
line=1,
title=Ast.Text(line=1, text='Feature title'),
description=Ast.Text(line=2, text='feature description'),
background=Ast.Background(
line=3,
title=Ast.Text(line=3, text='Some background'),
steps=[Ast.Step(line=4, title=Ast.Text(line=4, text='Given the problem'))]),
scenarios=[
Ast.Scenario(line=5,
title=Ast.Text(line=5, text='Scenario title'),
steps=[Ast.Step(line=6, title=Ast.Text(line=6, text='Given first step'))]),
Ast.Scenario(line=7,
title=Ast.Text(line=7, text='Another scenario'),
steps=[Ast.Step(line=8, title=Ast.Text(line=8, text='Given this step')),
Ast.Step(line=9, title=Ast.Text(line=9, text='When we take another step'))]),
],
))
def test_parse_tables_within_steps():
"Lexer.run() Should be able to parse example tables from steps"
# Given a parser loaded with steps that contain example tables
'''Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
Scenario: Plant a tree
Given the <name> of a garden
When I plant a tree
And wait for <num_days> days
Then I see it growing
'''
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Check models existence'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Background'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Given I have a garden in the database'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TABLE_COLUMN, '@name'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'Secret Garden'),
(5, gherkin.TOKEN_TABLE_COLUMN, '45'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'And I have gardens in the database'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, "Octopus' Garden"),
(8, gherkin.TOKEN_TABLE_COLUMN, '120'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_LABEL, 'Scenario'),
(9, gherkin.TOKEN_TEXT, 'Plant a tree'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'),
(10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'When I plant a tree'),
(11, gherkin.TOKEN_NEWLINE, '\n'),
(12, gherkin.TOKEN_TEXT, 'And wait for <num_days> days'),
(12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_TEXT, 'Then I see it growing'),
(13, gherkin.TOKEN_NEWLINE, '\n'),
(14, gherkin.TOKEN_EOF, '')
])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(
line=1,
title=Ast.Text(line=1, text='Check models existence'),
background=Ast.Background(
line=2,
steps=[
Ast.Step(
line=3,
title=Ast.Text(line=3, text='Given I have a garden in the database'),
table=Ast.Table(line=4, fields=[
['@name', 'area', 'raining'],
['Secret Garden', '45', 'false']])),
Ast.Step(
line=6,
title=Ast.Text(line=6, text='And I have gardens in the database'),
table=Ast.Table(line=7, fields=[
['name', 'area', 'raining'],
['Octopus\' Garden', '120', 'true']])),
]
),
scenarios=[
Ast.Scenario(
title=Ast.Text(line=9, text='Plant a tree'),
line=9,
steps=[
Ast.Step(line=10, title=Ast.Text(line=10, text='Given the <name> of a garden')),
Ast.Step(line=11, title=Ast.Text(line=11, text='When I plant a tree')),
Ast.Step(line=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),
Ast.Step(line=13, title=Ast.Text(line=13, text='Then I see it growing'))
])
],
))
def test_parse_quoted_strings_on_steps():
# Given a parser loaded with the following Gherkin document
# Given the following email template:
# '''Here we go with a pretty
# big block of text
# surrounded by triple quoted strings
# '''
# And a cat picture
# """Now notice we didn't use (:) above
# """
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Given the following email template'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_QUOTES, "'''"),
(2, gherkin.TOKEN_TEXT, '''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''),
(5, gherkin.TOKEN_QUOTES, "'''"),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'And a cat picture'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_QUOTES, '"""'),
(7, gherkin.TOKEN_TEXT, "Now notice we didn't use (:) above\n "),
(8, gherkin.TOKEN_QUOTES, '"""'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
steps = parser.parse_steps()
steps.should.equal([
Ast.Step(
line=1,
title=Ast.Text(line=1, text='Given the following email template'),
text=Ast.Text(line=2, text='''Here we go with a pretty
big block of text
surrounded by triple quoted strings
''')),
Ast.Step(
line=6,
title=Ast.Text(line=6, text='And a cat picture'),
text=Ast.Text(line=7, text="Now notice we didn't use (:) above\n "))])
def test_parse_text():
parser = Parser([
(1, gherkin.TOKEN_TAG, 'tag1'),
(1, gherkin.TOKEN_TAG, 'tag2'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TAG, 'tag3'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Feature'),
])
tags = parser.parse_tags()
tags.should.equal(['tag1', 'tag2', 'tag3'])
def test_parse_tags_on_scenario_outline_examples():
"Parser should allow tags to be defined in examples"
# Given a parser loaded with a document that contains tags on
# scenario outline examples
# @tagged-feature
# Feature: Parse tags
# @tag1 @tag2
# Scenario Outline: Test
# @example-tag1
# @example-tag2
# Examples:
# | Header |
parser = Parser([
(1, gherkin.TOKEN_TAG, 'tagged-feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TAG, 'tag1'),
(3, gherkin.TOKEN_TAG, 'tag2'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(4, gherkin.TOKEN_TEXT, 'Test'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TAG, 'example-tag1'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TAG, 'example-tag2'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_LABEL, 'Examples'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Header'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, ''),
])
# When I parse the document
feature = parser.parse_feature()
# Then I see all the tags were found
feature.should.equal(Ast.Feature(
line=2,
title=Ast.Text(line=2, text='Parse tags'),
tags=['tagged-feature'],
scenarios=[Ast.ScenarioOutline(
line=4,
title=Ast.Text(line=4, text='Test'),
tags=['tag1', 'tag2'],
examples=Ast.Examples(
line=7,
tags=['example-tag1', 'example-tag2'],
table=Ast.Table(line=8, fields=[['Header']])),
)]))
def test_parse_tags_on_feature_and_scenario():
# Given a parser loaded with a gherkin document with one tag on
# the feature and two tags on a scenario:
#
# @tagged-feature
# Feature: Parse tags
#
# @tag1 @tag2
# Scenario: Test
parser = Parser([
(1, gherkin.TOKEN_TAG, 'tagged-feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TAG, 'tag1'),
(4, gherkin.TOKEN_TAG, 'tag2'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Test'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_EOF, ''),
])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(
line=2,
title=Ast.Text(line=2, text='Parse tags'),
tags=['tagged-feature'],
scenarios=[Ast.Scenario(
line=5,
title=Ast.Text(line=5, text='Test'),
tags=['tag1', 'tag2'])]))
def test_ast_node_equal():
# Given two different AST nodes
n1 = Ast.Node()
n2 = Ast.Node()
# And different attributes to each node
n1.name = 'Lincoln'
n2.color = 'green'
# When I compare them
equal = n1 == n2
# Then I see they're different
equal.should.be.false
|
flexible
|
{
"blob_id": "44649e44da4eb80e7f869ff906798d5db493b913",
"index": 4415,
"step-1": "<mask token>\n\n\ndef test_lex_comment_no_newline():\n lexer = gherkin.Lexer(' test comment')\n new_state = lexer.lex_comment_metadata_value()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_until_newline():\n \"\"\"Lexer.lex_comment() Should parse comments until the newline character\"\"\"\n lexer = gherkin.Lexer('# one line\\n# another line')\n tokens = lexer.run()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_COMMENT,\n 'another line'), (2, gherkin.TOKEN_EOF, '')])\n\n\n<mask token>\n\n\ndef test_lex_text_with_labels():\n \"\"\"Lexer.run() Should be able to tokenize a feature with a scenario\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\n\nFeature: Some descriptive text\n In order to parse a Gherkin file\n As a parser\n I want to be able to parse scenarios\n\n Even more text\n\n Scenario: The user wants to describe a feature\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,\n gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.\n TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TEXT,\n 'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,\n gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,\n gherkin.TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_steps():\n \"\"\"Lexer.run() Should be able to tokenize steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"Feature: Feature title\n feature description\n Background: Some background\n about the problem\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_load_languages():\n \"\"\"Lexer.run() Should be able to parse different languages\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"# language: pt-br\n\n Funcionalidade: Interpretador para gherkin\n Para escrever testes de aceitação\n Como um programador\n Preciso de uma ferramenta de BDD\n Contexto:\n Dado que a variavel \"X\" contém o número 2\n Cenário: Lanche\n Dada uma maçã\n Quando mordida\n Então a fome passa\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,\n gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\\n'\n ), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Funcionalidade'), (3, gherkin.TOKEN_TEXT,\n 'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,\n gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Dado que a variavel \"X\" contém o número 2'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,\n gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (\n 10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.\n TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (\n 11, gherkin.TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\\n'), (13,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables():\n \"\"\"Lexer.run() Should be able to lex tables\"\"\"\n lexer = gherkin.Lexer(\"\"\" Examples:\n | column1 | column2 | \"\"\")\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (\n 2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])\n\n\n<mask token>\n\n\ndef test_lex_tables_within_steps():\n \"\"\"Lexer.run() Should be able to lex example tables from steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\tFeature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_multi_line_str():\n \"\"\"Lexer.run() Should be able to find multi quoted strings after labels\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Given the following email template:\n '''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n '''\n And a cat picture\n \"\"\\\"Now notice we didn't use (:) above\n \"\"\\\"\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL,\n 'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_QUOTES, \"'''\"), (2, gherkin.TOKEN_TEXT,\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n ), (5, gherkin.TOKEN_QUOTES, \"'''\"), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_QUOTES, '\"\"\"'), (7, gherkin\n .TOKEN_TEXT, \"\"\"Now notice we didn't use (:) above\n \"\"\"), (8,\n gherkin.TOKEN_QUOTES, '\"\"\"'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tags_empty():\n \"\"\"Lexer.lex_tag() Should bail if we reach EOF\"\"\"\n lexer = gherkin.Lexer('')\n lexer.lex_tag()\n lexer.tokens.should.be.empty\n\n\ndef test_lex_tags():\n \"\"\"Lexer.run() Should be able to find tags\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" @tagged-feature\n Feature: Parse tags\n\n @tag1 @tag2\n Scenario: Test\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TAG,\n 'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,\n 'Test'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_EOF, '')])\n\n\ndef test_parse_metadata_empty():\n Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none\n Parser([None]).parse_metadata().should.be.none\n\n\ndef test_parse_metadata_incomplete():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_EOF, '')])\n parser.parse_metadata().should.be.none\n\n\n<mask token>\n\n\ndef test_parse_empty_title():\n parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'more text after title')])\n feature = parser.parse_title()\n feature.should.be.none\n\n\n<mask token>\n\n\ndef test_parse_background():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.\n TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin\n .TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,\n gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])\n feature = parser.parse_background()\n feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text\n ='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=\n 'Given two users in the database'), table=Ast.Table(line=3, fields=\n [['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))]))\n\n\ndef teste_parse_scenario():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,\n text='Given first step'))])])\n\n\ndef teste_parse_scenario_with_description():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,\n 'More description'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.\n TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), description=Ast.Text(line=2, text=\n 'Scenario description More description'), steps=[Ast.Step(line=3,\n title=Ast.Text(line=3, text='Given first step'))])])\n\n\ndef test_parse_scenario_outline_with_examples():\n \"\"\"\"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,\n gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,\n gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n scenarios = parser.parse_scenarios()\n scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line\n =1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(\n line=2, text='Given the <name> of a garden')), Ast.Step(line=3,\n title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=\n 4, title=Ast.Text(line=4, text='And wait for <num_days> days')),\n Ast.Step(line=5, title=Ast.Text(line=5, text=\n 'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast\n .Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [\n 'Octopus', '5']])))])\n\n\n<mask token>\n\n\ndef test_parse_feature_two_backgrounds():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Background: Some background\n about the problem\n Background: Some other background\n will raise an exception\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\n<mask token>\n\n\ndef test_parse_feature():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,\n gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,\n '\\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TEXT,\n 'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Feature title'), description=Ast.Text(line=2, text=\n 'feature description'), background=Ast.Background(line=3, title=Ast\n .Text(line=3, text='Some background'), steps=[Ast.Step(line=4,\n title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast\n .Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),\n steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,\n text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(\n line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(\n line=9, text='When we take another step'))])]))\n\n\ndef test_parse_tables_within_steps():\n \"\"\"Lexer.run() Should be able to parse example tables from steps\"\"\"\n \"\"\"Feature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n Scenario: Plant a tree\n Given the <name> of a garden\n When I plant a tree\n And wait for <num_days> days\n Then I see it growing\n \"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL,\n 'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_TEXT,\n 'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.\n TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\\n'),\n (13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.\n TOKEN_NEWLINE, '\\n'), (14, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Check models existence'), background=Ast.Background(line=2, steps=\n [Ast.Step(line=3, title=Ast.Text(line=3, text=\n 'Given I have a garden in the database'), table=Ast.Table(line=4,\n fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',\n 'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'And I have gardens in the database'), table=Ast.Table(line=7,\n fields=[['name', 'area', 'raining'], [\"Octopus' Garden\", '120',\n 'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=\n 'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(\n line=10, text='Given the <name> of a garden')), Ast.Step(line=11,\n title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line\n =12, title=Ast.Text(line=12, text='And wait for <num_days> days')),\n Ast.Step(line=13, title=Ast.Text(line=13, text=\n 'Then I see it growing'))])]))\n\n\n<mask token>\n\n\ndef test_parse_text():\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,\n 'tag2'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TAG,\n 'tag3'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Feature')])\n tags = parser.parse_tags()\n tags.should.equal(['tag1', 'tag2', 'tag3'])\n\n\ndef test_parse_tags_on_scenario_outline_examples():\n \"\"\"Parser should allow tags to be defined in examples\"\"\"\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,\n gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL,\n 'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'\n ), (6, gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL,\n 'Examples'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=\n 'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.\n ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[\n 'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',\n 'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))\n\n\n<mask token>\n\n\ndef test_ast_node_equal():\n n1 = Ast.Node()\n n2 = Ast.Node()\n n1.name = 'Lincoln'\n n2.color = 'green'\n equal = n1 == n2\n equal.should.be.false\n",
"step-2": "<mask token>\n\n\ndef test_lex_test_eof():\n \"\"\"lex_text() Should be able to find EOF\"\"\"\n lexer = gherkin.Lexer('')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_text():\n \"\"\"lex_text() Should be able to find text before EOF\"\"\"\n lexer = gherkin.Lexer('some text')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text'), (1,\n gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_hash_with_text():\n \"\"\"lex_text() Should stop lexing at # (we found a comment!)\"\"\"\n lexer = gherkin.Lexer(' some text # random comment')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text ')])\n new_state.should.equal(lexer.lex_comment)\n\n\ndef test_lex_comment():\n \"\"\"lex_comment() Should stop lexing at \\\\n\"\"\"\n lexer = gherkin.Lexer(' random comment')\n new_state = lexer.lex_comment()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'random comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_meta_label():\n \"\"\"lex_comment() Should stop lexing at : (we found a label)\"\"\"\n lexer = gherkin.Lexer(' metadata: test')\n new_state = lexer.lex_comment()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'metadata')])\n new_state.should.equal(lexer.lex_comment_metadata_value)\n\n\n<mask token>\n\n\ndef test_lex_comment_no_newline():\n lexer = gherkin.Lexer(' test comment')\n new_state = lexer.lex_comment_metadata_value()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_until_newline():\n \"\"\"Lexer.lex_comment() Should parse comments until the newline character\"\"\"\n lexer = gherkin.Lexer('# one line\\n# another line')\n tokens = lexer.run()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_COMMENT,\n 'another line'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_comment_full():\n \"\"\"Lexer.run() Should be able to process metadata in comments\"\"\"\n lexer = gherkin.Lexer('some text # metadata-field: blah-value\\ntext')\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text '), (1, gherkin\n .TOKEN_META_LABEL, 'metadata-field'), (1, gherkin.TOKEN_META_VALUE,\n 'blah-value'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'text'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_label():\n \"\"\"Lexer.run() Should be able to parse a label with some text\"\"\"\n lexer = gherkin.Lexer(\n 'Feature: A cool feature\\n some more text\\n even more text')\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'A cool feature'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'some more text'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_TEXT, 'even more text'), (3, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_labels():\n \"\"\"Lexer.run() Should be able to tokenize a feature with a scenario\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\n\nFeature: Some descriptive text\n In order to parse a Gherkin file\n As a parser\n I want to be able to parse scenarios\n\n Even more text\n\n Scenario: The user wants to describe a feature\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,\n gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.\n TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TEXT,\n 'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,\n gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,\n gherkin.TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_steps():\n \"\"\"Lexer.run() Should be able to tokenize steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"Feature: Feature title\n feature description\n Background: Some background\n about the problem\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_load_languages():\n \"\"\"Lexer.run() Should be able to parse different languages\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"# language: pt-br\n\n Funcionalidade: Interpretador para gherkin\n Para escrever testes de aceitação\n Como um programador\n Preciso de uma ferramenta de BDD\n Contexto:\n Dado que a variavel \"X\" contém o número 2\n Cenário: Lanche\n Dada uma maçã\n Quando mordida\n Então a fome passa\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,\n gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\\n'\n ), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Funcionalidade'), (3, gherkin.TOKEN_TEXT,\n 'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,\n gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Dado que a variavel \"X\" contém o número 2'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,\n gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (\n 10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.\n TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (\n 11, gherkin.TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\\n'), (13,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables():\n \"\"\"Lexer.run() Should be able to lex tables\"\"\"\n lexer = gherkin.Lexer(\"\"\" Examples:\n | column1 | column2 | \"\"\")\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (\n 2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables_full():\n \"\"\"Lexer.run() Should be able to lex scenario outlines\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Feature: gherkin has steps with examples\n Scenario Outline: Add two numbers\n Given I have <input_1> and <input_2> the calculator\n When I press \"Sum\"!\n Then the result should be <output> on the screen\n Examples:\n | input_1 | input_2 | output |\n | 20 | 30 | 50 |\n | 0 | 40 | 40 |\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'gherkin has steps with examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),\n (2, gherkin.TOKEN_TEXT, 'Add two numbers'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'Given I have <input_1> and <input_2> the calculator'), (3, gherkin\n .TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'When I press \"Sum\"!'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5,\n gherkin.TOKEN_TEXT,\n 'Then the result should be <output> on the screen'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Examples'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'input_1'), (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'), (7, gherkin\n .TOKEN_TABLE_COLUMN, 'output'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (\n 8, gherkin.TOKEN_TABLE_COLUMN, '20'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '30'), (8, gherkin.TOKEN_TABLE_COLUMN, '50'), (\n 8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TABLE_COLUMN,\n '0'), (9, gherkin.TOKEN_TABLE_COLUMN, '40'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '40'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables_within_steps():\n \"\"\"Lexer.run() Should be able to lex example tables from steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\tFeature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_multi_line_str():\n \"\"\"Lexer.run() Should be able to find multi quoted strings after labels\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Given the following email template:\n '''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n '''\n And a cat picture\n \"\"\\\"Now notice we didn't use (:) above\n \"\"\\\"\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL,\n 'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_QUOTES, \"'''\"), (2, gherkin.TOKEN_TEXT,\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n ), (5, gherkin.TOKEN_QUOTES, \"'''\"), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_QUOTES, '\"\"\"'), (7, gherkin\n .TOKEN_TEXT, \"\"\"Now notice we didn't use (:) above\n \"\"\"), (8,\n gherkin.TOKEN_QUOTES, '\"\"\"'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tags_empty():\n \"\"\"Lexer.lex_tag() Should bail if we reach EOF\"\"\"\n lexer = gherkin.Lexer('')\n lexer.lex_tag()\n lexer.tokens.should.be.empty\n\n\ndef test_lex_tags():\n \"\"\"Lexer.run() Should be able to find tags\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" @tagged-feature\n Feature: Parse tags\n\n @tag1 @tag2\n Scenario: Test\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TAG,\n 'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,\n 'Test'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_EOF, '')])\n\n\ndef test_parse_metadata_empty():\n Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none\n Parser([None]).parse_metadata().should.be.none\n\n\ndef test_parse_metadata_incomplete():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_EOF, '')])\n parser.parse_metadata().should.be.none\n\n\ndef test_parse_metadata_syntax_error():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_TEXT, 'pt-br')])\n parser.parse_metadata.when.called.should.throw(SyntaxError,\n \"No value found for the meta-field `language'\")\n\n\n<mask token>\n\n\ndef test_parse_empty_title():\n parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'more text after title')])\n feature = parser.parse_title()\n feature.should.be.none\n\n\ndef test_parse_title():\n parser = Parser([(1, gherkin.TOKEN_TEXT, 'Scenario title'), (1, gherkin\n .TOKEN_NEWLINE, '\\n')])\n feature = parser.parse_title()\n feature.should.equal(Ast.Text(line=1, text='Scenario title'))\n\n\ndef test_parse_table():\n parser = Parser([(1, gherkin.TOKEN_TABLE_COLUMN, 'name'), (1, gherkin.\n TOKEN_TABLE_COLUMN, 'email'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (2, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL, 'Scenario'),\n (4, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_table()\n feature.should.equal(Ast.Table(line=1, fields=[['name', 'email'], [\n 'Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))\n\n\ndef test_parse_background():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.\n TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin\n .TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,\n gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])\n feature = parser.parse_background()\n feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text\n ='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=\n 'Given two users in the database'), table=Ast.Table(line=3, fields=\n [['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))]))\n\n\ndef teste_parse_scenario():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,\n text='Given first step'))])])\n\n\ndef teste_parse_scenario_with_description():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,\n 'More description'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.\n TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), description=Ast.Text(line=2, text=\n 'Scenario description More description'), steps=[Ast.Step(line=3,\n title=Ast.Text(line=3, text='Given first step'))])])\n\n\ndef test_parse_scenario_outline_with_examples():\n \"\"\"\"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,\n gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,\n gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n scenarios = parser.parse_scenarios()\n scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line\n =1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(\n line=2, text='Given the <name> of a garden')), Ast.Step(line=3,\n title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=\n 4, title=Ast.Text(line=4, text='And wait for <num_days> days')),\n Ast.Step(line=5, title=Ast.Text(line=5, text=\n 'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast\n .Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [\n 'Octopus', '5']])))])\n\n\n<mask token>\n\n\ndef test_parse_feature_two_backgrounds():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Background: Some background\n about the problem\n Background: Some other background\n will raise an exception\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\ndef test_parse_feature_background_wrong_place():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n Background: Some background\n about the problem\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\ndef test_parse_feature():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,\n gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,\n '\\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TEXT,\n 'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Feature title'), description=Ast.Text(line=2, text=\n 'feature description'), background=Ast.Background(line=3, title=Ast\n .Text(line=3, text='Some background'), steps=[Ast.Step(line=4,\n title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast\n .Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),\n steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,\n text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(\n line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(\n line=9, text='When we take another step'))])]))\n\n\ndef test_parse_tables_within_steps():\n \"\"\"Lexer.run() Should be able to parse example tables from steps\"\"\"\n \"\"\"Feature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n Scenario: Plant a tree\n Given the <name> of a garden\n When I plant a tree\n And wait for <num_days> days\n Then I see it growing\n \"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL,\n 'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_TEXT,\n 'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.\n TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\\n'),\n (13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.\n TOKEN_NEWLINE, '\\n'), (14, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Check models existence'), background=Ast.Background(line=2, steps=\n [Ast.Step(line=3, title=Ast.Text(line=3, text=\n 'Given I have a garden in the database'), table=Ast.Table(line=4,\n fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',\n 'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'And I have gardens in the database'), table=Ast.Table(line=7,\n fields=[['name', 'area', 'raining'], [\"Octopus' Garden\", '120',\n 'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=\n 'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(\n line=10, text='Given the <name> of a garden')), Ast.Step(line=11,\n title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line\n =12, title=Ast.Text(line=12, text='And wait for <num_days> days')),\n Ast.Step(line=13, title=Ast.Text(line=13, text=\n 'Then I see it growing'))])]))\n\n\n<mask token>\n\n\ndef test_parse_text():\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,\n 'tag2'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TAG,\n 'tag3'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Feature')])\n tags = parser.parse_tags()\n tags.should.equal(['tag1', 'tag2', 'tag3'])\n\n\ndef test_parse_tags_on_scenario_outline_examples():\n \"\"\"Parser should allow tags to be defined in examples\"\"\"\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,\n gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL,\n 'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'\n ), (6, gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL,\n 'Examples'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=\n 'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.\n ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[\n 'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',\n 'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))\n\n\n<mask token>\n\n\ndef test_ast_node_equal():\n n1 = Ast.Node()\n n2 = Ast.Node()\n n1.name = 'Lincoln'\n n2.color = 'green'\n equal = n1 == n2\n equal.should.be.false\n",
"step-3": "<mask token>\n\n\ndef test_lex_test_eof():\n \"\"\"lex_text() Should be able to find EOF\"\"\"\n lexer = gherkin.Lexer('')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_text():\n \"\"\"lex_text() Should be able to find text before EOF\"\"\"\n lexer = gherkin.Lexer('some text')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text'), (1,\n gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_hash_with_text():\n \"\"\"lex_text() Should stop lexing at # (we found a comment!)\"\"\"\n lexer = gherkin.Lexer(' some text # random comment')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text ')])\n new_state.should.equal(lexer.lex_comment)\n\n\ndef test_lex_comment():\n \"\"\"lex_comment() Should stop lexing at \\\\n\"\"\"\n lexer = gherkin.Lexer(' random comment')\n new_state = lexer.lex_comment()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'random comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_meta_label():\n \"\"\"lex_comment() Should stop lexing at : (we found a label)\"\"\"\n lexer = gherkin.Lexer(' metadata: test')\n new_state = lexer.lex_comment()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'metadata')])\n new_state.should.equal(lexer.lex_comment_metadata_value)\n\n\n<mask token>\n\n\ndef test_lex_comment_no_newline():\n lexer = gherkin.Lexer(' test comment')\n new_state = lexer.lex_comment_metadata_value()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_until_newline():\n \"\"\"Lexer.lex_comment() Should parse comments until the newline character\"\"\"\n lexer = gherkin.Lexer('# one line\\n# another line')\n tokens = lexer.run()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_COMMENT,\n 'another line'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_comment_full():\n \"\"\"Lexer.run() Should be able to process metadata in comments\"\"\"\n lexer = gherkin.Lexer('some text # metadata-field: blah-value\\ntext')\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text '), (1, gherkin\n .TOKEN_META_LABEL, 'metadata-field'), (1, gherkin.TOKEN_META_VALUE,\n 'blah-value'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'text'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_label():\n \"\"\"Lexer.run() Should be able to parse a label with some text\"\"\"\n lexer = gherkin.Lexer(\n 'Feature: A cool feature\\n some more text\\n even more text')\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'A cool feature'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'some more text'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_TEXT, 'even more text'), (3, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_labels():\n \"\"\"Lexer.run() Should be able to tokenize a feature with a scenario\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\n\nFeature: Some descriptive text\n In order to parse a Gherkin file\n As a parser\n I want to be able to parse scenarios\n\n Even more text\n\n Scenario: The user wants to describe a feature\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,\n gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.\n TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TEXT,\n 'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,\n gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,\n gherkin.TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_steps():\n \"\"\"Lexer.run() Should be able to tokenize steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"Feature: Feature title\n feature description\n Background: Some background\n about the problem\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_load_languages():\n \"\"\"Lexer.run() Should be able to parse different languages\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"# language: pt-br\n\n Funcionalidade: Interpretador para gherkin\n Para escrever testes de aceitação\n Como um programador\n Preciso de uma ferramenta de BDD\n Contexto:\n Dado que a variavel \"X\" contém o número 2\n Cenário: Lanche\n Dada uma maçã\n Quando mordida\n Então a fome passa\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,\n gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\\n'\n ), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Funcionalidade'), (3, gherkin.TOKEN_TEXT,\n 'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,\n gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Dado que a variavel \"X\" contém o número 2'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,\n gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (\n 10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.\n TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (\n 11, gherkin.TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\\n'), (13,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables():\n \"\"\"Lexer.run() Should be able to lex tables\"\"\"\n lexer = gherkin.Lexer(\"\"\" Examples:\n | column1 | column2 | \"\"\")\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (\n 2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables_full():\n \"\"\"Lexer.run() Should be able to lex scenario outlines\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Feature: gherkin has steps with examples\n Scenario Outline: Add two numbers\n Given I have <input_1> and <input_2> the calculator\n When I press \"Sum\"!\n Then the result should be <output> on the screen\n Examples:\n | input_1 | input_2 | output |\n | 20 | 30 | 50 |\n | 0 | 40 | 40 |\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'gherkin has steps with examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),\n (2, gherkin.TOKEN_TEXT, 'Add two numbers'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'Given I have <input_1> and <input_2> the calculator'), (3, gherkin\n .TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'When I press \"Sum\"!'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5,\n gherkin.TOKEN_TEXT,\n 'Then the result should be <output> on the screen'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Examples'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'input_1'), (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'), (7, gherkin\n .TOKEN_TABLE_COLUMN, 'output'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (\n 8, gherkin.TOKEN_TABLE_COLUMN, '20'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '30'), (8, gherkin.TOKEN_TABLE_COLUMN, '50'), (\n 8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TABLE_COLUMN,\n '0'), (9, gherkin.TOKEN_TABLE_COLUMN, '40'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '40'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables_within_steps():\n \"\"\"Lexer.run() Should be able to lex example tables from steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\tFeature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_multi_line_str():\n \"\"\"Lexer.run() Should be able to find multi quoted strings after labels\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Given the following email template:\n '''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n '''\n And a cat picture\n \"\"\\\"Now notice we didn't use (:) above\n \"\"\\\"\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL,\n 'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_QUOTES, \"'''\"), (2, gherkin.TOKEN_TEXT,\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n ), (5, gherkin.TOKEN_QUOTES, \"'''\"), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_QUOTES, '\"\"\"'), (7, gherkin\n .TOKEN_TEXT, \"\"\"Now notice we didn't use (:) above\n \"\"\"), (8,\n gherkin.TOKEN_QUOTES, '\"\"\"'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tags_empty():\n \"\"\"Lexer.lex_tag() Should bail if we reach EOF\"\"\"\n lexer = gherkin.Lexer('')\n lexer.lex_tag()\n lexer.tokens.should.be.empty\n\n\ndef test_lex_tags():\n \"\"\"Lexer.run() Should be able to find tags\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" @tagged-feature\n Feature: Parse tags\n\n @tag1 @tag2\n Scenario: Test\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TAG,\n 'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,\n 'Test'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_EOF, '')])\n\n\ndef test_parse_metadata_empty():\n Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none\n Parser([None]).parse_metadata().should.be.none\n\n\ndef test_parse_metadata_incomplete():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_EOF, '')])\n parser.parse_metadata().should.be.none\n\n\ndef test_parse_metadata_syntax_error():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_TEXT, 'pt-br')])\n parser.parse_metadata.when.called.should.throw(SyntaxError,\n \"No value found for the meta-field `language'\")\n\n\n<mask token>\n\n\ndef test_parse_empty_title():\n parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'more text after title')])\n feature = parser.parse_title()\n feature.should.be.none\n\n\ndef test_parse_title():\n parser = Parser([(1, gherkin.TOKEN_TEXT, 'Scenario title'), (1, gherkin\n .TOKEN_NEWLINE, '\\n')])\n feature = parser.parse_title()\n feature.should.equal(Ast.Text(line=1, text='Scenario title'))\n\n\ndef test_parse_table():\n parser = Parser([(1, gherkin.TOKEN_TABLE_COLUMN, 'name'), (1, gherkin.\n TOKEN_TABLE_COLUMN, 'email'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (2, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL, 'Scenario'),\n (4, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_table()\n feature.should.equal(Ast.Table(line=1, fields=[['name', 'email'], [\n 'Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))\n\n\ndef test_parse_background():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.\n TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin\n .TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,\n gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])\n feature = parser.parse_background()\n feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text\n ='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=\n 'Given two users in the database'), table=Ast.Table(line=3, fields=\n [['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))]))\n\n\ndef teste_parse_scenario():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,\n text='Given first step'))])])\n\n\ndef teste_parse_scenario_with_description():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,\n 'More description'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.\n TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), description=Ast.Text(line=2, text=\n 'Scenario description More description'), steps=[Ast.Step(line=3,\n title=Ast.Text(line=3, text='Given first step'))])])\n\n\ndef test_parse_scenario_outline_with_examples():\n \"\"\"\"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,\n gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,\n gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n scenarios = parser.parse_scenarios()\n scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line\n =1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(\n line=2, text='Given the <name> of a garden')), Ast.Step(line=3,\n title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=\n 4, title=Ast.Text(line=4, text='And wait for <num_days> days')),\n Ast.Step(line=5, title=Ast.Text(line=5, text=\n 'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast\n .Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [\n 'Octopus', '5']])))])\n\n\n<mask token>\n\n\ndef test_parse_feature_two_backgrounds():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Background: Some background\n about the problem\n Background: Some other background\n will raise an exception\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\ndef test_parse_feature_background_wrong_place():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n Background: Some background\n about the problem\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\ndef test_parse_feature():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,\n gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,\n '\\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TEXT,\n 'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Feature title'), description=Ast.Text(line=2, text=\n 'feature description'), background=Ast.Background(line=3, title=Ast\n .Text(line=3, text='Some background'), steps=[Ast.Step(line=4,\n title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast\n .Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),\n steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,\n text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(\n line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(\n line=9, text='When we take another step'))])]))\n\n\ndef test_parse_tables_within_steps():\n \"\"\"Lexer.run() Should be able to parse example tables from steps\"\"\"\n \"\"\"Feature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n Scenario: Plant a tree\n Given the <name> of a garden\n When I plant a tree\n And wait for <num_days> days\n Then I see it growing\n \"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL,\n 'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_TEXT,\n 'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.\n TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\\n'),\n (13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.\n TOKEN_NEWLINE, '\\n'), (14, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Check models existence'), background=Ast.Background(line=2, steps=\n [Ast.Step(line=3, title=Ast.Text(line=3, text=\n 'Given I have a garden in the database'), table=Ast.Table(line=4,\n fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',\n 'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'And I have gardens in the database'), table=Ast.Table(line=7,\n fields=[['name', 'area', 'raining'], [\"Octopus' Garden\", '120',\n 'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=\n 'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(\n line=10, text='Given the <name> of a garden')), Ast.Step(line=11,\n title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line\n =12, title=Ast.Text(line=12, text='And wait for <num_days> days')),\n Ast.Step(line=13, title=Ast.Text(line=13, text=\n 'Then I see it growing'))])]))\n\n\ndef test_parse_quoted_strings_on_steps():\n parser = Parser([(1, gherkin.TOKEN_LABEL,\n 'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_QUOTES, \"'''\"), (2, gherkin.TOKEN_TEXT,\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n ), (5, gherkin.TOKEN_QUOTES, \"'''\"), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_QUOTES, '\"\"\"'), (7, gherkin\n .TOKEN_TEXT, \"\"\"Now notice we didn't use (:) above\n \"\"\"), (8,\n gherkin.TOKEN_QUOTES, '\"\"\"'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n steps = parser.parse_steps()\n steps.should.equal([Ast.Step(line=1, title=Ast.Text(line=1, text=\n 'Given the following email template'), text=Ast.Text(line=2, text=\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n )), Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'And a cat picture'), text=Ast.Text(line=7, text=\n \"\"\"Now notice we didn't use (:) above\n \"\"\"))])\n\n\ndef test_parse_text():\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,\n 'tag2'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TAG,\n 'tag3'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Feature')])\n tags = parser.parse_tags()\n tags.should.equal(['tag1', 'tag2', 'tag3'])\n\n\ndef test_parse_tags_on_scenario_outline_examples():\n \"\"\"Parser should allow tags to be defined in examples\"\"\"\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,\n gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL,\n 'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'\n ), (6, gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL,\n 'Examples'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=\n 'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.\n ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[\n 'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',\n 'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))\n\n\n<mask token>\n\n\ndef test_ast_node_equal():\n n1 = Ast.Node()\n n2 = Ast.Node()\n n1.name = 'Lincoln'\n n2.color = 'green'\n equal = n1 == n2\n equal.should.be.false\n",
"step-4": "<mask token>\n\n\ndef test_lex_test_eof():\n \"\"\"lex_text() Should be able to find EOF\"\"\"\n lexer = gherkin.Lexer('')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_text():\n \"\"\"lex_text() Should be able to find text before EOF\"\"\"\n lexer = gherkin.Lexer('some text')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text'), (1,\n gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_hash_with_text():\n \"\"\"lex_text() Should stop lexing at # (we found a comment!)\"\"\"\n lexer = gherkin.Lexer(' some text # random comment')\n new_state = lexer.lex_text()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text ')])\n new_state.should.equal(lexer.lex_comment)\n\n\ndef test_lex_comment():\n \"\"\"lex_comment() Should stop lexing at \\\\n\"\"\"\n lexer = gherkin.Lexer(' random comment')\n new_state = lexer.lex_comment()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'random comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_meta_label():\n \"\"\"lex_comment() Should stop lexing at : (we found a label)\"\"\"\n lexer = gherkin.Lexer(' metadata: test')\n new_state = lexer.lex_comment()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'metadata')])\n new_state.should.equal(lexer.lex_comment_metadata_value)\n\n\ndef test_lex_comment_metadata_value():\n \"\"\"lex_comment_metadata_value() Should stop lexing at \n\"\"\"\n lexer = gherkin.Lexer(' test value\\nblah')\n new_state = lexer.lex_comment_metadata_value()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test value')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_no_newline():\n lexer = gherkin.Lexer(' test comment')\n new_state = lexer.lex_comment_metadata_value()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_META_VALUE, 'test comment')])\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_until_newline():\n \"\"\"Lexer.lex_comment() Should parse comments until the newline character\"\"\"\n lexer = gherkin.Lexer('# one line\\n# another line')\n tokens = lexer.run()\n lexer.tokens.should.equal([(1, gherkin.TOKEN_COMMENT, 'one line'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_COMMENT,\n 'another line'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_comment_full():\n \"\"\"Lexer.run() Should be able to process metadata in comments\"\"\"\n lexer = gherkin.Lexer('some text # metadata-field: blah-value\\ntext')\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TEXT, 'some text '), (1, gherkin\n .TOKEN_META_LABEL, 'metadata-field'), (1, gherkin.TOKEN_META_VALUE,\n 'blah-value'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'text'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_label():\n \"\"\"Lexer.run() Should be able to parse a label with some text\"\"\"\n lexer = gherkin.Lexer(\n 'Feature: A cool feature\\n some more text\\n even more text')\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'A cool feature'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'some more text'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_TEXT, 'even more text'), (3, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_labels():\n \"\"\"Lexer.run() Should be able to tokenize a feature with a scenario\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\n\nFeature: Some descriptive text\n In order to parse a Gherkin file\n As a parser\n I want to be able to parse scenarios\n\n Even more text\n\n Scenario: The user wants to describe a feature\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Feature'), (3,\n gherkin.TOKEN_TEXT, 'Some descriptive text'), (3, gherkin.\n TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'In order to parse a Gherkin file'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_TEXT, 'As a parser'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TEXT,\n 'I want to be able to parse scenarios'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Even more text'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_LABEL, 'Scenario'), (10,\n gherkin.TOKEN_TEXT, 'The user wants to describe a feature'), (10,\n gherkin.TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_text_with_steps():\n \"\"\"Lexer.run() Should be able to tokenize steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"Feature: Feature title\n feature description\n Background: Some background\n about the problem\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'about the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TEXT, 'When second step'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Then third step'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.\n TOKEN_EOF, '')])\n\n\ndef test_lex_load_languages():\n \"\"\"Lexer.run() Should be able to parse different languages\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"# language: pt-br\n\n Funcionalidade: Interpretador para gherkin\n Para escrever testes de aceitação\n Como um programador\n Preciso de uma ferramenta de BDD\n Contexto:\n Dado que a variavel \"X\" contém o número 2\n Cenário: Lanche\n Dada uma maçã\n Quando mordida\n Então a fome passa\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_META_LABEL, 'language'), (1,\n gherkin.TOKEN_META_VALUE, 'pt-br'), (1, gherkin.TOKEN_NEWLINE, '\\n'\n ), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Funcionalidade'), (3, gherkin.TOKEN_TEXT,\n 'Interpretador para gherkin'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'), (4,\n gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Como um programador'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Contexto'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.TOKEN_TEXT,\n 'Dado que a variavel \"X\" contém o número 2'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL, 'Cenário'), (9,\n gherkin.TOKEN_TEXT, 'Lanche'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (\n 10, gherkin.TOKEN_TEXT, 'Dada uma maçã'), (10, gherkin.\n TOKEN_NEWLINE, '\\n'), (11, gherkin.TOKEN_TEXT, 'Quando mordida'), (\n 11, gherkin.TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'Então a fome passa'), (12, gherkin.TOKEN_NEWLINE, '\\n'), (13,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables():\n \"\"\"Lexer.run() Should be able to lex tables\"\"\"\n lexer = gherkin.Lexer(\"\"\" Examples:\n | column1 | column2 | \"\"\")\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'), (\n 2, gherkin.TOKEN_TABLE_COLUMN, 'column2'), (2, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables_full():\n \"\"\"Lexer.run() Should be able to lex scenario outlines\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Feature: gherkin has steps with examples\n Scenario Outline: Add two numbers\n Given I have <input_1> and <input_2> the calculator\n When I press \"Sum\"!\n Then the result should be <output> on the screen\n Examples:\n | input_1 | input_2 | output |\n | 20 | 30 | 50 |\n | 0 | 40 | 40 |\n\"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'gherkin has steps with examples'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),\n (2, gherkin.TOKEN_TEXT, 'Add two numbers'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'Given I have <input_1> and <input_2> the calculator'), (3, gherkin\n .TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TEXT,\n 'When I press \"Sum\"!'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5,\n gherkin.TOKEN_TEXT,\n 'Then the result should be <output> on the screen'), (5, gherkin.\n TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Examples'), (6,\n gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'input_1'), (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'), (7, gherkin\n .TOKEN_TABLE_COLUMN, 'output'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (\n 8, gherkin.TOKEN_TABLE_COLUMN, '20'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '30'), (8, gherkin.TOKEN_TABLE_COLUMN, '50'), (\n 8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TABLE_COLUMN,\n '0'), (9, gherkin.TOKEN_TABLE_COLUMN, '40'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '40'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tables_within_steps():\n \"\"\"Lexer.run() Should be able to lex example tables from steps\"\"\"\n lexer = gherkin.Lexer(\n \"\"\"\tFeature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_multi_line_str():\n \"\"\"Lexer.run() Should be able to find multi quoted strings after labels\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" Given the following email template:\n '''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n '''\n And a cat picture\n \"\"\\\"Now notice we didn't use (:) above\n \"\"\\\"\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_LABEL,\n 'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_QUOTES, \"'''\"), (2, gherkin.TOKEN_TEXT,\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n ), (5, gherkin.TOKEN_QUOTES, \"'''\"), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_QUOTES, '\"\"\"'), (7, gherkin\n .TOKEN_TEXT, \"\"\"Now notice we didn't use (:) above\n \"\"\"), (8,\n gherkin.TOKEN_QUOTES, '\"\"\"'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n\n\ndef test_lex_tags_empty():\n \"\"\"Lexer.lex_tag() Should bail if we reach EOF\"\"\"\n lexer = gherkin.Lexer('')\n lexer.lex_tag()\n lexer.tokens.should.be.empty\n\n\ndef test_lex_tags():\n \"\"\"Lexer.run() Should be able to find tags\"\"\"\n lexer = gherkin.Lexer(\n \"\"\" @tagged-feature\n Feature: Parse tags\n\n @tag1 @tag2\n Scenario: Test\n \"\"\"\n )\n tokens = lexer.run()\n tokens.should.equal([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1,\n gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE,\n '\\n'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TAG,\n 'tag1'), (4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE,\n '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT,\n 'Test'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_EOF, '')])\n\n\ndef test_parse_metadata_empty():\n Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none\n Parser([None]).parse_metadata().should.be.none\n\n\ndef test_parse_metadata_incomplete():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_EOF, '')])\n parser.parse_metadata().should.be.none\n\n\ndef test_parse_metadata_syntax_error():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_TEXT, 'pt-br')])\n parser.parse_metadata.when.called.should.throw(SyntaxError,\n \"No value found for the meta-field `language'\")\n\n\ndef test_parse_metadata():\n parser = Parser([(1, gherkin.TOKEN_META_LABEL, 'language'), (1, gherkin\n .TOKEN_META_VALUE, 'pt-br')])\n metadata = parser.parse_metadata()\n metadata.should.equal(Ast.Metadata(line=1, key='language', value='pt-br'))\n\n\ndef test_parse_empty_title():\n parser = Parser([(1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.\n TOKEN_TEXT, 'more text after title')])\n feature = parser.parse_title()\n feature.should.be.none\n\n\ndef test_parse_title():\n parser = Parser([(1, gherkin.TOKEN_TEXT, 'Scenario title'), (1, gherkin\n .TOKEN_NEWLINE, '\\n')])\n feature = parser.parse_title()\n feature.should.equal(Ast.Text(line=1, text='Scenario title'))\n\n\ndef test_parse_table():\n parser = Parser([(1, gherkin.TOKEN_TABLE_COLUMN, 'name'), (1, gherkin.\n TOKEN_TABLE_COLUMN, 'email'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (2, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL, 'Scenario'),\n (4, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_table()\n feature.should.equal(Ast.Table(line=1, fields=[['name', 'email'], [\n 'Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))\n\n\ndef test_parse_background():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Background'), (1, gherkin.\n TOKEN_TEXT, 'title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin\n .TOKEN_LABEL, 'Given two users in the database'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TABLE_COLUMN, 'name'), (3,\n gherkin.TOKEN_TABLE_COLUMN, 'email'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'), (4, gherkin.\n TOKEN_TABLE_COLUMN, '[email protected]'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'), (\n 5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL, 'Scenario')])\n feature = parser.parse_background()\n feature.should.equal(Ast.Background(line=1, title=Ast.Text(line=1, text\n ='title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2, text=\n 'Given two users in the database'), table=Ast.Table(line=3, fields=\n [['name', 'email'], ['Lincoln', '[email protected]'], ['Gabriel',\n '[email protected]']]))]))\n\n\ndef teste_parse_scenario():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), steps=[Ast.Step(line=2, title=Ast.Text(line=2,\n text='Given first step'))])])\n\n\ndef teste_parse_scenario_with_description():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario'), (1, gherkin.\n TOKEN_TEXT, 'Scenario title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'Scenario description'), (2, gherkin.TOKEN_TEXT,\n 'More description'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.\n TOKEN_TEXT, 'Given first step')])\n feature = parser.parse_scenarios()\n feature.should.equal([Ast.Scenario(line=1, title=Ast.Text(line=1, text=\n 'Scenario title'), description=Ast.Text(line=2, text=\n 'Scenario description More description'), steps=[Ast.Step(line=3,\n title=Ast.Text(line=3, text='Given first step'))])])\n\n\ndef test_parse_scenario_outline_with_examples():\n \"\"\"\"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Scenario Outline'), (1,\n gherkin.TOKEN_TEXT, 'Plant a tree'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'), (2,\n gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_TEXT,\n 'When I plant a tree'), (3, gherkin.TOKEN_NEWLINE, '\\n'), (4,\n gherkin.TOKEN_TEXT, 'And wait for <num_days> days'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TEXT,\n 'Then I see it growing'), (5, gherkin.TOKEN_NEWLINE, '\\n'), (6,\n gherkin.TOKEN_LABEL, 'Examples'), (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'num_days'), (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'), (8, gherkin.\n TOKEN_TABLE_COLUMN, '2'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_TABLE_COLUMN, 'Octopus'), (9, gherkin.\n TOKEN_TABLE_COLUMN, '5'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n scenarios = parser.parse_scenarios()\n scenarios.should.equal([Ast.ScenarioOutline(line=1, title=Ast.Text(line\n =1, text='Plant a tree'), steps=[Ast.Step(line=2, title=Ast.Text(\n line=2, text='Given the <name> of a garden')), Ast.Step(line=3,\n title=Ast.Text(line=3, text='When I plant a tree')), Ast.Step(line=\n 4, title=Ast.Text(line=4, text='And wait for <num_days> days')),\n Ast.Step(line=5, title=Ast.Text(line=5, text=\n 'Then I see it growing'))], examples=Ast.Examples(line=6, table=Ast\n .Table(line=7, fields=[['name', 'num_days'], ['Secret', '2'], [\n 'Octopus', '5']])))])\n\n\ndef test_parse_not_starting_with_feature():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nScenario: Scenario title\n Given first step\n When second step\n Then third step\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"Feature expected in the beginning of the file, found `Scenario' though.\"\n )\n\n\ndef test_parse_feature_two_backgrounds():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Background: Some background\n about the problem\n Background: Some other background\n will raise an exception\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\ndef test_parse_feature_background_wrong_place():\n parser = gherkin.Parser(gherkin.Lexer(\n \"\"\"\nFeature: Feature title\n feature description\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n Background: Some background\n about the problem\n \"\"\"\n ).run())\n parser.parse_feature.when.called.should.throw(SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\"\n )\n\n\ndef test_parse_feature():\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Feature title'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2,\n gherkin.TOKEN_TEXT, 'feature description'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL, 'Background'), (3,\n gherkin.TOKEN_TEXT, 'Some background'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TEXT, 'Given the problem'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_LABEL, 'Scenario'), (5,\n gherkin.TOKEN_TEXT, 'Scenario title'), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'Given first step'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL, 'Scenario'), (7,\n gherkin.TOKEN_TEXT, 'Another scenario'), (7, gherkin.TOKEN_NEWLINE,\n '\\n'), (8, gherkin.TOKEN_TEXT, 'Given this step'), (8, gherkin.\n TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_TEXT,\n 'When we take another step'), (9, gherkin.TOKEN_NEWLINE, '\\n'), (10,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Feature title'), description=Ast.Text(line=2, text=\n 'feature description'), background=Ast.Background(line=3, title=Ast\n .Text(line=3, text='Some background'), steps=[Ast.Step(line=4,\n title=Ast.Text(line=4, text='Given the problem'))]), scenarios=[Ast\n .Scenario(line=5, title=Ast.Text(line=5, text='Scenario title'),\n steps=[Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'Given first step'))]), Ast.Scenario(line=7, title=Ast.Text(line=7,\n text='Another scenario'), steps=[Ast.Step(line=8, title=Ast.Text(\n line=8, text='Given this step')), Ast.Step(line=9, title=Ast.Text(\n line=9, text='When we take another step'))])]))\n\n\ndef test_parse_tables_within_steps():\n \"\"\"Lexer.run() Should be able to parse example tables from steps\"\"\"\n \"\"\"Feature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n Scenario: Plant a tree\n Given the <name> of a garden\n When I plant a tree\n And wait for <num_days> days\n Then I see it growing\n \"\"\"\n parser = Parser([(1, gherkin.TOKEN_LABEL, 'Feature'), (1, gherkin.\n TOKEN_TEXT, 'Check models existence'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_LABEL, 'Background'), (2, gherkin.\n TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Given I have a garden in the database'), (3, gherkin.TOKEN_NEWLINE,\n '\\n'), (4, gherkin.TOKEN_TABLE_COLUMN, '@name'), (4, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (4, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5, gherkin.\n TOKEN_TABLE_COLUMN, 'Secret Garden'), (5, gherkin.\n TOKEN_TABLE_COLUMN, '45'), (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_LABEL,\n 'And I have gardens in the database'), (6, gherkin.TOKEN_NEWLINE,\n '\\n'), (7, gherkin.TOKEN_TABLE_COLUMN, 'name'), (7, gherkin.\n TOKEN_TABLE_COLUMN, 'area'), (7, gherkin.TOKEN_TABLE_COLUMN,\n 'raining'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, \"Octopus' Garden\"), (8, gherkin.\n TOKEN_TABLE_COLUMN, '120'), (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'), (9, gherkin.TOKEN_LABEL,\n 'Scenario'), (9, gherkin.TOKEN_TEXT, 'Plant a tree'), (9, gherkin.\n TOKEN_NEWLINE, '\\n'), (10, gherkin.TOKEN_TEXT,\n 'Given the <name> of a garden'), (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_TEXT, 'When I plant a tree'), (11, gherkin.\n TOKEN_NEWLINE, '\\n'), (12, gherkin.TOKEN_TEXT,\n 'And wait for <num_days> days'), (12, gherkin.TOKEN_NEWLINE, '\\n'),\n (13, gherkin.TOKEN_TEXT, 'Then I see it growing'), (13, gherkin.\n TOKEN_NEWLINE, '\\n'), (14, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=1, title=Ast.Text(line=1, text=\n 'Check models existence'), background=Ast.Background(line=2, steps=\n [Ast.Step(line=3, title=Ast.Text(line=3, text=\n 'Given I have a garden in the database'), table=Ast.Table(line=4,\n fields=[['@name', 'area', 'raining'], ['Secret Garden', '45',\n 'false']])), Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'And I have gardens in the database'), table=Ast.Table(line=7,\n fields=[['name', 'area', 'raining'], [\"Octopus' Garden\", '120',\n 'true']]))]), scenarios=[Ast.Scenario(title=Ast.Text(line=9, text=\n 'Plant a tree'), line=9, steps=[Ast.Step(line=10, title=Ast.Text(\n line=10, text='Given the <name> of a garden')), Ast.Step(line=11,\n title=Ast.Text(line=11, text='When I plant a tree')), Ast.Step(line\n =12, title=Ast.Text(line=12, text='And wait for <num_days> days')),\n Ast.Step(line=13, title=Ast.Text(line=13, text=\n 'Then I see it growing'))])]))\n\n\ndef test_parse_quoted_strings_on_steps():\n parser = Parser([(1, gherkin.TOKEN_LABEL,\n 'Given the following email template'), (1, gherkin.TOKEN_NEWLINE,\n '\\n'), (2, gherkin.TOKEN_QUOTES, \"'''\"), (2, gherkin.TOKEN_TEXT,\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n ), (5, gherkin.TOKEN_QUOTES, \"'''\"), (5, gherkin.TOKEN_NEWLINE,\n '\\n'), (6, gherkin.TOKEN_TEXT, 'And a cat picture'), (6, gherkin.\n TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_QUOTES, '\"\"\"'), (7, gherkin\n .TOKEN_TEXT, \"\"\"Now notice we didn't use (:) above\n \"\"\"), (8,\n gherkin.TOKEN_QUOTES, '\"\"\"'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n steps = parser.parse_steps()\n steps.should.equal([Ast.Step(line=1, title=Ast.Text(line=1, text=\n 'Given the following email template'), text=Ast.Text(line=2, text=\n \"\"\"Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n \"\"\"\n )), Ast.Step(line=6, title=Ast.Text(line=6, text=\n 'And a cat picture'), text=Ast.Text(line=7, text=\n \"\"\"Now notice we didn't use (:) above\n \"\"\"))])\n\n\ndef test_parse_text():\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tag1'), (1, gherkin.TOKEN_TAG,\n 'tag2'), (1, gherkin.TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_TAG,\n 'tag3'), (2, gherkin.TOKEN_NEWLINE, '\\n'), (3, gherkin.TOKEN_LABEL,\n 'Feature')])\n tags = parser.parse_tags()\n tags.should.equal(['tag1', 'tag2', 'tag3'])\n\n\ndef test_parse_tags_on_scenario_outline_examples():\n \"\"\"Parser should allow tags to be defined in examples\"\"\"\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,\n gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TAG, 'tag1'), (3, gherkin.TOKEN_TAG, 'tag2'), (3,\n gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_LABEL,\n 'Scenario Outline'), (4, gherkin.TOKEN_TEXT, 'Test'), (4, gherkin.\n TOKEN_NEWLINE, '\\n'), (5, gherkin.TOKEN_TAG, 'example-tag1'), (5,\n gherkin.TOKEN_NEWLINE, '\\n'), (6, gherkin.TOKEN_TAG, 'example-tag2'\n ), (6, gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_LABEL,\n 'Examples'), (7, gherkin.TOKEN_NEWLINE, '\\n'), (8, gherkin.\n TOKEN_TABLE_COLUMN, 'Header'), (8, gherkin.TOKEN_NEWLINE, '\\n'), (9,\n gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=\n 'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.\n ScenarioOutline(line=4, title=Ast.Text(line=4, text='Test'), tags=[\n 'tag1', 'tag2'], examples=Ast.Examples(line=7, tags=['example-tag1',\n 'example-tag2'], table=Ast.Table(line=8, fields=[['Header']])))]))\n\n\ndef test_parse_tags_on_feature_and_scenario():\n parser = Parser([(1, gherkin.TOKEN_TAG, 'tagged-feature'), (1, gherkin.\n TOKEN_NEWLINE, '\\n'), (2, gherkin.TOKEN_LABEL, 'Feature'), (2,\n gherkin.TOKEN_TEXT, 'Parse tags'), (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'), (4, gherkin.TOKEN_TAG, 'tag1'), (\n 4, gherkin.TOKEN_TAG, 'tag2'), (4, gherkin.TOKEN_NEWLINE, '\\n'), (5,\n gherkin.TOKEN_LABEL, 'Scenario'), (5, gherkin.TOKEN_TEXT, 'Test'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'), (7, gherkin.TOKEN_EOF, '')])\n feature = parser.parse_feature()\n feature.should.equal(Ast.Feature(line=2, title=Ast.Text(line=2, text=\n 'Parse tags'), tags=['tagged-feature'], scenarios=[Ast.Scenario(\n line=5, title=Ast.Text(line=5, text='Test'), tags=['tag1', 'tag2'])]))\n\n\ndef test_ast_node_equal():\n n1 = Ast.Node()\n n2 = Ast.Node()\n n1.name = 'Lincoln'\n n2.color = 'green'\n equal = n1 == n2\n equal.should.be.false\n",
"step-5": "# -*- coding: utf-8; -*-\n\nimport gherkin\nfrom gherkin import Lexer, Parser, Ast\n\n\ndef test_lex_test_eof():\n \"lex_text() Should be able to find EOF\"\n\n # Given a lexer that takes '' as the input string\n lexer = gherkin.Lexer('')\n\n # When we try to lex any text from ''\n new_state = lexer.lex_text()\n\n # Then we see we've got to EOF and that new state is nil\n lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])\n new_state.should.be.none\n\n\ndef test_lex_text():\n \"lex_text() Should be able to find text before EOF\"\n\n # Given a lexer that takes some text as input string\n lexer = gherkin.Lexer('some text')\n\n # When we lex it\n new_state = lexer.lex_text()\n\n # Then we see we found both the text and the EOF token\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_TEXT, 'some text'),\n (1, gherkin.TOKEN_EOF, '')\n ])\n\n # And the new state is nil\n new_state.should.be.none\n\n\ndef test_lex_hash_with_text():\n \"lex_text() Should stop lexing at # (we found a comment!)\"\n\n # Given a lexer with some text and some comment\n lexer = gherkin.Lexer(' some text # random comment')\n\n # When the input is lexed through the text lexer\n new_state = lexer.lex_text()\n\n # Then we see the following token on the output list\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_TEXT, 'some text '),\n ])\n\n # And that the next state will lex comments\n new_state.should.equal(lexer.lex_comment)\n\n\ndef test_lex_comment():\n \"lex_comment() Should stop lexing at \\\\n\"\n\n # Given a lexer loaded with some comments\n lexer = gherkin.Lexer(' random comment')\n\n # When We lex the input text\n new_state = lexer.lex_comment()\n\n # Then we see the comment above was captured\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_COMMENT, 'random comment'),\n ])\n\n # And that new state is lex_text()\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_meta_label():\n \"lex_comment() Should stop lexing at : (we found a label)\"\n\n # Given a lexer loaded with a comment that contains a label\n lexer = gherkin.Lexer(' metadata: test')\n\n # When we lex the comment\n new_state = lexer.lex_comment()\n\n # Then we see that a label was found\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_META_LABEL, 'metadata'),\n ])\n\n # And that new state is going to read the value of the variable we\n # just found\n new_state.should.equal(lexer.lex_comment_metadata_value)\n\n\ndef test_lex_comment_metadata_value():\n \"lex_comment_metadata_value() Should stop lexing at \\n\"\n\n # Given a lexer loaded with the value of a label and a new line\n # with more text\n lexer = gherkin.Lexer(' test value\\nblah')\n\n # When we lex the input string\n new_state = lexer.lex_comment_metadata_value()\n\n # Then we see that only the value present is the one before the\n # \\n, everything else will be lexed by lex_text\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_META_VALUE, 'test value'),\n ])\n\n # And we also see that the next\n new_state.should.equal(lexer.lex_text)\n\ndef test_lex_comment_no_newline():\n\n # Given a lexer loaded with a comment without the newline marker\n lexer = gherkin.Lexer(' test comment')\n\n # When we lex the input string\n new_state = lexer.lex_comment_metadata_value()\n\n # Then we see the whole line was captured\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_META_VALUE, 'test comment'),\n ])\n\n # And we also see that the next\n new_state.should.equal(lexer.lex_text)\n\n\ndef test_lex_comment_until_newline():\n \"Lexer.lex_comment() Should parse comments until the newline character\"\n\n # Given a lexer loaded with comments containing a metadata field\n lexer = gherkin.Lexer('# one line\\n# another line')\n\n # When I run the lexer\n tokens = lexer.run()\n\n # Then we see both lines were captured\n lexer.tokens.should.equal([\n (1, gherkin.TOKEN_COMMENT, 'one line'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_COMMENT, 'another line'),\n (2, gherkin.TOKEN_EOF, ''),\n ])\n\n\ndef test_lex_comment_full():\n \"Lexer.run() Should be able to process metadata in comments\"\n\n # Given a lexer loaded with comments containing a metadata field\n lexer = gherkin.Lexer('some text # metadata-field: blah-value\\ntext')\n\n # When I run the lexer\n tokens = lexer.run()\n\n # Then I see the tokens collected match some text, a field, more\n # text and EOF\n tokens.should.equal([\n (1, gherkin.TOKEN_TEXT, 'some text '),\n (1, gherkin.TOKEN_META_LABEL, 'metadata-field'),\n (1, gherkin.TOKEN_META_VALUE, 'blah-value'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'text'),\n (2, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_text_with_label():\n \"Lexer.run() Should be able to parse a label with some text\"\n\n # Given a lexer loaded with a feature\n lexer = gherkin.Lexer(\n 'Feature: A cool feature\\n some more text\\n even more text')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see the token list matches the label, text, text EOF\n # sequence\n tokens.should.equal([\n (1, gherkin.TOKEN_LABEL, 'Feature'),\n (1, gherkin.TOKEN_TEXT, 'A cool feature'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'some more text'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TEXT, 'even more text'),\n (3, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_text_with_labels():\n \"Lexer.run() Should be able to tokenize a feature with a scenario\"\n\n # Given a lexer with a more complete feature+scenario\n lexer = gherkin.Lexer('''\n\nFeature: Some descriptive text\n In order to parse a Gherkin file\n As a parser\n I want to be able to parse scenarios\n\n Even more text\n\n Scenario: The user wants to describe a feature\n''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see it was broken down into the right list of tokens\n tokens.should.equal([\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Feature'),\n (3, gherkin.TOKEN_TEXT, 'Some descriptive text'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TEXT, 'In order to parse a Gherkin file'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TEXT, 'As a parser'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TEXT, 'I want to be able to parse scenarios'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TEXT, 'Even more text'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_NEWLINE, '\\n'),\n (10, gherkin.TOKEN_LABEL, 'Scenario'),\n (10, gherkin.TOKEN_TEXT, 'The user wants to describe a feature'),\n (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_text_with_steps():\n \"Lexer.run() Should be able to tokenize steps\"\n\n # Given a lexer loaded with feature+background+scenario+steps\n lexer = gherkin.Lexer('''\\\nFeature: Feature title\n feature description\n Background: Some background\n about the problem\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see that everything, including the steps was properly\n # tokenized\n tokens.should.equal([\n (1, gherkin.TOKEN_LABEL, 'Feature'),\n (1, gherkin.TOKEN_TEXT, 'Feature title'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'feature description'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Background'),\n (3, gherkin.TOKEN_TEXT, 'Some background'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TEXT, 'about the problem'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_LABEL, 'Scenario'),\n (5, gherkin.TOKEN_TEXT, 'Scenario title'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TEXT, 'Given first step'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TEXT, 'When second step'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TEXT, 'Then third step'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_load_languages():\n \"Lexer.run() Should be able to parse different languages\"\n\n # Given the following lexer instance loaded with another language\n lexer = gherkin.Lexer('''# language: pt-br\n\n Funcionalidade: Interpretador para gherkin\n Para escrever testes de aceitação\n Como um programador\n Preciso de uma ferramenta de BDD\n Contexto:\n Dado que a variavel \"X\" contém o número 2\n Cenário: Lanche\n Dada uma maçã\n Quando mordida\n Então a fome passa\n ''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then the following list of tokens is generated\n tokens.should.equal([\n (1, gherkin.TOKEN_META_LABEL, 'language'),\n (1, gherkin.TOKEN_META_VALUE, 'pt-br'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Funcionalidade'),\n (3, gherkin.TOKEN_TEXT, 'Interpretador para gherkin'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TEXT, 'Como um programador'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_LABEL, 'Contexto'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TEXT, 'Dado que a variavel \"X\" contém o número 2'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_LABEL, 'Cenário'),\n (9, gherkin.TOKEN_TEXT, 'Lanche'),\n (9, gherkin.TOKEN_NEWLINE, '\\n'),\n (10, gherkin.TOKEN_TEXT, 'Dada uma maçã'),\n (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_TEXT, 'Quando mordida'),\n (11, gherkin.TOKEN_NEWLINE, '\\n'),\n (12, gherkin.TOKEN_TEXT, 'Então a fome passa'),\n (12, gherkin.TOKEN_NEWLINE, '\\n'),\n (13, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_tables():\n \"Lexer.run() Should be able to lex tables\"\n\n # Given the following lexer loaded with an examples label followed\n # by a table that ends before '\\n'\n lexer = gherkin.Lexer('''\\\n Examples:\n | column1 | column2 | ''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see the scenario outline case was properly parsed\n tokens.should.equal([\n (1, gherkin.TOKEN_LABEL, 'Examples'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TABLE_COLUMN, 'column1'),\n (2, gherkin.TOKEN_TABLE_COLUMN, 'column2'),\n (2, gherkin.TOKEN_EOF, ''),\n ])\n\n\ndef test_lex_tables_full():\n \"Lexer.run() Should be able to lex scenario outlines\"\n\n lexer = gherkin.Lexer('''\\\n Feature: gherkin has steps with examples\n Scenario Outline: Add two numbers\n Given I have <input_1> and <input_2> the calculator\n When I press \"Sum\"!\n Then the result should be <output> on the screen\n Examples:\n | input_1 | input_2 | output |\n | 20 | 30 | 50 |\n | 0 | 40 | 40 |\n''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see the scenario outline case was properly parsed\n tokens.should.equal([\n (1, gherkin.TOKEN_LABEL, 'Feature'),\n (1, gherkin.TOKEN_TEXT, 'gherkin has steps with examples'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Scenario Outline'),\n (2, gherkin.TOKEN_TEXT, 'Add two numbers'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TEXT, 'Given I have <input_1> and <input_2> the calculator'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TEXT, 'When I press \"Sum\"!'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TEXT, 'Then the result should be <output> on the screen'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_LABEL, 'Examples'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'input_1'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'output'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, '20'),\n (8, gherkin.TOKEN_TABLE_COLUMN, '30'),\n (8, gherkin.TOKEN_TABLE_COLUMN, '50'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_TABLE_COLUMN, '0'),\n (9, gherkin.TOKEN_TABLE_COLUMN, '40'),\n (9, gherkin.TOKEN_TABLE_COLUMN, '40'),\n (9, gherkin.TOKEN_NEWLINE, '\\n'),\n (10, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_tables_within_steps():\n \"Lexer.run() Should be able to lex example tables from steps\"\n\n # Given a lexer loaded with steps that contain example tables\n lexer = gherkin.Lexer('''\\\n\tFeature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n ''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see that steps that contain : will be identified as\n # labels\n tokens.should.equal([\n (1, gherkin.TOKEN_LABEL, 'Feature'),\n (1, gherkin.TOKEN_TEXT, 'Check models existence'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Background'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Given I have a garden in the database'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TABLE_COLUMN, '@name'),\n (4, gherkin.TOKEN_TABLE_COLUMN, 'area'),\n (4, gherkin.TOKEN_TABLE_COLUMN, 'raining'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TABLE_COLUMN, 'Secret Garden'),\n (5, gherkin.TOKEN_TABLE_COLUMN, '45'),\n (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_LABEL, 'And I have gardens in the database'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'area'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'raining'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Octopus\\' Garden'),\n (8, gherkin.TOKEN_TABLE_COLUMN, '120'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_EOF, '')\n ])\n\n\ndef test_lex_multi_line_str():\n \"Lexer.run() Should be able to find multi quoted strings after labels\"\n\n # Given a lexer loaded with steps that contain example tables\n lexer = gherkin.Lexer('''\\\n Given the following email template:\n ''\\'Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n ''\\'\n And a cat picture\n \"\"\"Now notice we didn't use (:) above\n \"\"\"\n ''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see that triple quoted strings are captured by the lexer\n tokens.should.equal([\n (1, gherkin.TOKEN_LABEL, 'Given the following email template'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_QUOTES, \"'''\"),\n (2, gherkin.TOKEN_TEXT, '''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n '''),\n (5, gherkin.TOKEN_QUOTES, \"'''\"),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TEXT, 'And a cat picture'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_QUOTES, '\"\"\"'),\n (7, gherkin.TOKEN_TEXT, \"Now notice we didn't use (:) above\\n \"),\n (8, gherkin.TOKEN_QUOTES, '\"\"\"'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_EOF, '')\n ])\n\ndef test_lex_tags_empty():\n \"Lexer.lex_tag() Should bail if we reach EOF\"\n\n # Given a lexer loaded with an empty string\n lexer = gherkin.Lexer('')\n\n # When we try to lex tags\n lexer.lex_tag()\n\n # Then we see we found no tokens\n lexer.tokens.should.be.empty\n\n\ndef test_lex_tags():\n \"Lexer.run() Should be able to find tags\"\n\n # Given a lexer loaded with steps that contain example tables\n lexer = gherkin.Lexer('''\\\n @tagged-feature\n Feature: Parse tags\n\n @tag1 @tag2\n Scenario: Test\n ''')\n\n # When we run the lexer\n tokens = lexer.run()\n\n # Then we see that triple quoted strings are captured by the lexer\n tokens.should.equal([\n (1, gherkin.TOKEN_TAG, 'tagged-feature'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TAG, 'tag1'),\n (4, gherkin.TOKEN_TAG, 'tag2'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_LABEL, 'Scenario'),\n (5, gherkin.TOKEN_TEXT, 'Test'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_EOF, ''),\n ])\n\n\ndef test_parse_metadata_empty():\n\n Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none\n\n Parser([None]).parse_metadata().should.be.none\n\n\ndef test_parse_metadata_incomplete():\n\n parser = Parser([\n (1, gherkin.TOKEN_META_LABEL, 'language'),\n (1, gherkin.TOKEN_EOF, ''),\n ])\n\n parser.parse_metadata().should.be.none\n\n\ndef test_parse_metadata_syntax_error():\n\n parser = Parser([\n (1, gherkin.TOKEN_META_LABEL, 'language'),\n (1, gherkin.TOKEN_TEXT, 'pt-br'),\n ])\n\n parser.parse_metadata.when.called.should.throw(\n SyntaxError, 'No value found for the meta-field `language\\'')\n\n\ndef test_parse_metadata():\n\n parser = Parser([\n (1, gherkin.TOKEN_META_LABEL, 'language'),\n (1, gherkin.TOKEN_META_VALUE, 'pt-br'),\n ])\n\n metadata = parser.parse_metadata()\n\n metadata.should.equal(Ast.Metadata(line=1, key='language', value='pt-br'))\n\n\ndef test_parse_empty_title():\n\n parser = Parser([\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'more text after title'),\n ])\n\n feature = parser.parse_title()\n\n feature.should.be.none\n\n\ndef test_parse_title():\n\n parser = Parser([\n (1, gherkin.TOKEN_TEXT, 'Scenario title'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n ])\n\n feature = parser.parse_title()\n\n feature.should.equal(Ast.Text(line=1, text='Scenario title'))\n\n\ndef test_parse_table():\n\n parser = Parser([\n (1, gherkin.TOKEN_TABLE_COLUMN, 'name'),\n (1, gherkin.TOKEN_TABLE_COLUMN, 'email'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'),\n (2, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'),\n (3, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_LABEL, 'Scenario'),\n (4, gherkin.TOKEN_EOF, ''),\n ])\n\n feature = parser.parse_table()\n\n feature.should.equal(Ast.Table(line=1, fields=[\n ['name', 'email'],\n ['Lincoln', '[email protected]'],\n ['Gabriel', '[email protected]'],\n ]))\n\n\ndef test_parse_background():\n\n # Background: title\n # Given two users in the database:\n # | name | email |\n # | Lincoln | [email protected] |\n # | Gabriel | [email protected] |\n # Scenario:\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Background'),\n (1, gherkin.TOKEN_TEXT, 'title'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Given two users in the database'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TABLE_COLUMN, 'name'),\n (3, gherkin.TOKEN_TABLE_COLUMN, 'email'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'),\n (4, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'),\n (5, gherkin.TOKEN_TABLE_COLUMN, '[email protected]'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_LABEL, 'Scenario'),\n ])\n\n # When the background is parsed\n feature = parser.parse_background()\n\n # Then I see the output contains a valid background with a step\n # with examples. Notice the scenario label is not returned\n # anywhere here\n feature.should.equal(Ast.Background(\n line=1,\n title=Ast.Text(line=1, text='title'),\n steps=[\n Ast.Step(\n line=2,\n title=Ast.Text(line=2, text='Given two users in the database'),\n table=Ast.Table(line=3, fields=[\n ['name', 'email'],\n ['Lincoln', '[email protected]'],\n ['Gabriel', '[email protected]'],\n ]))\n ]))\n\n\n## Scenarios\n\n\ndef teste_parse_scenario():\n\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Scenario'),\n (1, gherkin.TOKEN_TEXT, 'Scenario title'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'Given first step'),\n ])\n\n feature = parser.parse_scenarios()\n\n feature.should.equal([Ast.Scenario(\n line=1,\n title=Ast.Text(line=1, text='Scenario title'),\n steps=[Ast.Step(line=2, title=Ast.Text(line=2, text='Given first step'))],\n )])\n\n\ndef teste_parse_scenario_with_description():\n\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Scenario'),\n (1, gherkin.TOKEN_TEXT, 'Scenario title'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'Scenario description'),\n (2, gherkin.TOKEN_TEXT, 'More description'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TEXT, 'Given first step'),\n ])\n\n feature = parser.parse_scenarios()\n\n feature.should.equal([Ast.Scenario(\n line=1,\n title=Ast.Text(line=1, text='Scenario title'),\n description=Ast.Text( line=2, text='Scenario description More description'),\n steps=[Ast.Step(line=3, title=Ast.Text(line=3, text='Given first step'))],\n )])\n\n\ndef test_parse_scenario_outline_with_examples():\n \"\"\n\n # Given a parser loaded with the following gherkin document:\n #\n # Scenario Outline: Plant a tree\n # Given the <name> of a garden\n # When I plant a tree\n # And wait for <num_days> days\n # Then I see it growing\n # Examples:\n # | name | num_days |\n # | Secret | 2 |\n # | Octopus | 5 |\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Scenario Outline'),\n (1, gherkin.TOKEN_TEXT, 'Plant a tree'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TEXT, 'When I plant a tree'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TEXT, 'And wait for <num_days> days'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TEXT, 'Then I see it growing'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_LABEL, 'Examples'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'num_days'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'),\n (8, gherkin.TOKEN_TABLE_COLUMN, '2'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_TABLE_COLUMN, 'Octopus'),\n (9, gherkin.TOKEN_TABLE_COLUMN, '5'),\n (9, gherkin.TOKEN_NEWLINE, '\\n'),\n (10, gherkin.TOKEN_EOF, '')\n ])\n\n scenarios = parser.parse_scenarios()\n\n scenarios.should.equal([\n Ast.ScenarioOutline(\n line=1,\n title=Ast.Text(line=1, text='Plant a tree'),\n steps=[Ast.Step(line=2, title=Ast.Text(line=2, text='Given the <name> of a garden')),\n Ast.Step(line=3, title=Ast.Text(line=3, text='When I plant a tree')),\n Ast.Step(line=4, title=Ast.Text(line=4, text='And wait for <num_days> days')),\n Ast.Step(line=5, title=Ast.Text(line=5, text='Then I see it growing'))],\n examples=Ast.Examples(line=6, table=Ast.Table(line=7, fields=[\n ['name', 'num_days'],\n ['Secret', '2'],\n ['Octopus', '5'],\n ]))\n )])\n\n\ndef test_parse_not_starting_with_feature():\n\n parser = gherkin.Parser(gherkin.Lexer('''\nScenario: Scenario title\n Given first step\n When second step\n Then third step\n ''').run())\n\n parser.parse_feature.when.called.should.throw(\n SyntaxError,\n \"Feature expected in the beginning of the file, \"\n \"found `Scenario' though.\")\n\n\ndef test_parse_feature_two_backgrounds():\n\n parser = gherkin.Parser(gherkin.Lexer('''\nFeature: Feature title\n feature description\n Background: Some background\n about the problem\n Background: Some other background\n will raise an exception\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n ''').run())\n\n parser.parse_feature.when.called.should.throw(\n SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\")\n\n\ndef test_parse_feature_background_wrong_place():\n\n parser = gherkin.Parser(gherkin.Lexer('''\nFeature: Feature title\n feature description\n Scenario: Scenario title\n Given first step\n When second step\n Then third step\n Background: Some background\n about the problem\n ''').run())\n\n parser.parse_feature.when.called.should.throw(\n SyntaxError,\n \"`Background' should not be declared here, Scenario or Scenario Outline expected\")\n\n\ndef test_parse_feature():\n\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Feature'),\n (1, gherkin.TOKEN_TEXT, 'Feature title'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TEXT, 'feature description'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Background'),\n (3, gherkin.TOKEN_TEXT, 'Some background'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TEXT, 'Given the problem'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_LABEL, 'Scenario'),\n (5, gherkin.TOKEN_TEXT, 'Scenario title'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TEXT, 'Given first step'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_LABEL, 'Scenario'),\n (7, gherkin.TOKEN_TEXT, 'Another scenario'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TEXT, 'Given this step'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_TEXT, 'When we take another step'),\n (9, gherkin.TOKEN_NEWLINE, '\\n'),\n (10, gherkin.TOKEN_EOF, ''),\n ])\n\n feature = parser.parse_feature()\n\n feature.should.equal(Ast.Feature(\n line=1,\n title=Ast.Text(line=1, text='Feature title'),\n description=Ast.Text(line=2, text='feature description'),\n background=Ast.Background(\n line=3,\n title=Ast.Text(line=3, text='Some background'),\n steps=[Ast.Step(line=4, title=Ast.Text(line=4, text='Given the problem'))]),\n scenarios=[\n Ast.Scenario(line=5,\n title=Ast.Text(line=5, text='Scenario title'),\n steps=[Ast.Step(line=6, title=Ast.Text(line=6, text='Given first step'))]),\n Ast.Scenario(line=7,\n title=Ast.Text(line=7, text='Another scenario'),\n steps=[Ast.Step(line=8, title=Ast.Text(line=8, text='Given this step')),\n Ast.Step(line=9, title=Ast.Text(line=9, text='When we take another step'))]),\n ],\n ))\n\n\ndef test_parse_tables_within_steps():\n \"Lexer.run() Should be able to parse example tables from steps\"\n\n # Given a parser loaded with steps that contain example tables\n '''Feature: Check models existence\n\t\tBackground:\n\t Given I have a garden in the database:\n\t | @name | area | raining |\n\t | Secret Garden | 45 | false |\n\t And I have gardens in the database:\n\t | name | area | raining |\n\t | Octopus' Garden | 120 | true |\n Scenario: Plant a tree\n Given the <name> of a garden\n When I plant a tree\n And wait for <num_days> days\n Then I see it growing\n '''\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Feature'),\n (1, gherkin.TOKEN_TEXT, 'Check models existence'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Background'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Given I have a garden in the database'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TABLE_COLUMN, '@name'),\n (4, gherkin.TOKEN_TABLE_COLUMN, 'area'),\n (4, gherkin.TOKEN_TABLE_COLUMN, 'raining'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TABLE_COLUMN, 'Secret Garden'),\n (5, gherkin.TOKEN_TABLE_COLUMN, '45'),\n (5, gherkin.TOKEN_TABLE_COLUMN, 'false'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_LABEL, 'And I have gardens in the database'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'name'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'area'),\n (7, gherkin.TOKEN_TABLE_COLUMN, 'raining'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, \"Octopus' Garden\"),\n (8, gherkin.TOKEN_TABLE_COLUMN, '120'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'true'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_LABEL, 'Scenario'),\n (9, gherkin.TOKEN_TEXT, 'Plant a tree'),\n (9, gherkin.TOKEN_NEWLINE, '\\n'),\n (10, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'),\n (10, gherkin.TOKEN_NEWLINE, '\\n'),\n (11, gherkin.TOKEN_TEXT, 'When I plant a tree'),\n (11, gherkin.TOKEN_NEWLINE, '\\n'),\n (12, gherkin.TOKEN_TEXT, 'And wait for <num_days> days'),\n (12, gherkin.TOKEN_NEWLINE, '\\n'),\n (13, gherkin.TOKEN_TEXT, 'Then I see it growing'),\n (13, gherkin.TOKEN_NEWLINE, '\\n'),\n (14, gherkin.TOKEN_EOF, '')\n ])\n\n feature = parser.parse_feature()\n\n feature.should.equal(Ast.Feature(\n line=1,\n title=Ast.Text(line=1, text='Check models existence'),\n background=Ast.Background(\n line=2,\n steps=[\n Ast.Step(\n line=3,\n title=Ast.Text(line=3, text='Given I have a garden in the database'),\n table=Ast.Table(line=4, fields=[\n ['@name', 'area', 'raining'],\n ['Secret Garden', '45', 'false']])),\n Ast.Step(\n line=6,\n title=Ast.Text(line=6, text='And I have gardens in the database'),\n table=Ast.Table(line=7, fields=[\n ['name', 'area', 'raining'],\n ['Octopus\\' Garden', '120', 'true']])),\n ]\n ),\n scenarios=[\n Ast.Scenario(\n title=Ast.Text(line=9, text='Plant a tree'),\n line=9,\n steps=[\n Ast.Step(line=10, title=Ast.Text(line=10, text='Given the <name> of a garden')),\n Ast.Step(line=11, title=Ast.Text(line=11, text='When I plant a tree')),\n Ast.Step(line=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),\n Ast.Step(line=13, title=Ast.Text(line=13, text='Then I see it growing'))\n ])\n ],\n ))\n\n\ndef test_parse_quoted_strings_on_steps():\n\n # Given a parser loaded with the following Gherkin document\n # Given the following email template:\n # '''Here we go with a pretty\n # big block of text\n # surrounded by triple quoted strings\n # '''\n # And a cat picture\n # \"\"\"Now notice we didn't use (:) above\n # \"\"\"\n parser = Parser([\n (1, gherkin.TOKEN_LABEL, 'Given the following email template'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_QUOTES, \"'''\"),\n (2, gherkin.TOKEN_TEXT, '''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n '''),\n (5, gherkin.TOKEN_QUOTES, \"'''\"),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TEXT, 'And a cat picture'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_QUOTES, '\"\"\"'),\n (7, gherkin.TOKEN_TEXT, \"Now notice we didn't use (:) above\\n \"),\n (8, gherkin.TOKEN_QUOTES, '\"\"\"'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_EOF, '')\n ])\n\n steps = parser.parse_steps()\n\n steps.should.equal([\n Ast.Step(\n line=1,\n title=Ast.Text(line=1, text='Given the following email template'),\n text=Ast.Text(line=2, text='''Here we go with a pretty\n big block of text\n surrounded by triple quoted strings\n ''')),\n Ast.Step(\n line=6,\n title=Ast.Text(line=6, text='And a cat picture'),\n text=Ast.Text(line=7, text=\"Now notice we didn't use (:) above\\n \"))])\n\n\ndef test_parse_text():\n parser = Parser([\n (1, gherkin.TOKEN_TAG, 'tag1'),\n (1, gherkin.TOKEN_TAG, 'tag2'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_TAG, 'tag3'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_LABEL, 'Feature'),\n ])\n\n tags = parser.parse_tags()\n\n tags.should.equal(['tag1', 'tag2', 'tag3'])\n\n\ndef test_parse_tags_on_scenario_outline_examples():\n \"Parser should allow tags to be defined in examples\"\n\n # Given a parser loaded with a document that contains tags on\n # scenario outline examples\n # @tagged-feature\n # Feature: Parse tags\n # @tag1 @tag2\n # Scenario Outline: Test\n # @example-tag1\n # @example-tag2\n # Examples:\n # | Header |\n\n parser = Parser([\n (1, gherkin.TOKEN_TAG, 'tagged-feature'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_TAG, 'tag1'),\n (3, gherkin.TOKEN_TAG, 'tag2'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_LABEL, 'Scenario Outline'),\n (4, gherkin.TOKEN_TEXT, 'Test'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_TAG, 'example-tag1'),\n (5, gherkin.TOKEN_NEWLINE, '\\n'),\n (6, gherkin.TOKEN_TAG, 'example-tag2'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_LABEL, 'Examples'),\n (7, gherkin.TOKEN_NEWLINE, '\\n'),\n (8, gherkin.TOKEN_TABLE_COLUMN, 'Header'),\n (8, gherkin.TOKEN_NEWLINE, '\\n'),\n (9, gherkin.TOKEN_EOF, ''),\n ])\n\n # When I parse the document\n feature = parser.parse_feature()\n\n # Then I see all the tags were found\n feature.should.equal(Ast.Feature(\n line=2,\n title=Ast.Text(line=2, text='Parse tags'),\n tags=['tagged-feature'],\n scenarios=[Ast.ScenarioOutline(\n line=4,\n title=Ast.Text(line=4, text='Test'),\n tags=['tag1', 'tag2'],\n examples=Ast.Examples(\n line=7,\n tags=['example-tag1', 'example-tag2'],\n table=Ast.Table(line=8, fields=[['Header']])),\n )]))\n\n\ndef test_parse_tags_on_feature_and_scenario():\n\n # Given a parser loaded with a gherkin document with one tag on\n # the feature and two tags on a scenario:\n #\n # @tagged-feature\n # Feature: Parse tags\n #\n # @tag1 @tag2\n # Scenario: Test\n parser = Parser([\n (1, gherkin.TOKEN_TAG, 'tagged-feature'),\n (1, gherkin.TOKEN_NEWLINE, '\\n'),\n (2, gherkin.TOKEN_LABEL, 'Feature'),\n (2, gherkin.TOKEN_TEXT, 'Parse tags'),\n (2, gherkin.TOKEN_NEWLINE, '\\n'),\n (3, gherkin.TOKEN_NEWLINE, '\\n'),\n (4, gherkin.TOKEN_TAG, 'tag1'),\n (4, gherkin.TOKEN_TAG, 'tag2'),\n (4, gherkin.TOKEN_NEWLINE, '\\n'),\n (5, gherkin.TOKEN_LABEL, 'Scenario'),\n (5, gherkin.TOKEN_TEXT, 'Test'),\n (6, gherkin.TOKEN_NEWLINE, '\\n'),\n (7, gherkin.TOKEN_EOF, ''),\n ])\n\n feature = parser.parse_feature()\n\n feature.should.equal(Ast.Feature(\n line=2,\n title=Ast.Text(line=2, text='Parse tags'),\n tags=['tagged-feature'],\n scenarios=[Ast.Scenario(\n line=5,\n title=Ast.Text(line=5, text='Test'),\n tags=['tag1', 'tag2'])]))\n\n\ndef test_ast_node_equal():\n\n # Given two different AST nodes\n n1 = Ast.Node()\n n2 = Ast.Node()\n\n # And different attributes to each node\n n1.name = 'Lincoln'\n n2.color = 'green'\n\n # When I compare them\n equal = n1 == n2\n\n # Then I see they're different\n equal.should.be.false\n",
"step-ids": [
23,
35,
36,
40,
42
]
}
|
[
23,
35,
36,
40,
42
] |
from collections import OrderedDict
import torch
from torch import nn, Tensor
import warnings
from typing import Tuple, List, Dict, Optional, Union
class GeneralizedRCNN(nn.Module):
def __init__(self, backbone, rpn, roi_heads, transform):
super(GeneralizedRCNN, self).__init__()
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
def forward(self, images, targets=None):
if self.training and targets is None:
raise ValueError("In training model, targets should be passed")
if self.training:
assert targets is not None
# GT box shape,dtype check
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target bxes to be a tensor"
f"of shape [N, 4], got {boxes.shape}.")
else:
raise ValueError(f"Expected target boxes to be of type"
f"Tensor, got {type(boxes)}.")
# add original image sizes
original_image_sizes : List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:] # (height, width)
assert len(val) == 2
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
# degenerate boxes are boxes with x2y2 valeus smaller than x1y1
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(f"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}")
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict(['0', features])
proposals, proposal_losses = self.rpn(images, features, targets)
detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, detections
|
normal
|
{
"blob_id": "83ecb6b6237d7ee61f762b191ebc891521067a41",
"index": 9206,
"step-1": "<mask token>\n\n\nclass GeneralizedRCNN(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GeneralizedRCNN(nn.Module):\n\n def __init__(self, backbone, rpn, roi_heads, transform):\n super(GeneralizedRCNN, self).__init__()\n self.transform = transform\n self.backbone = backbone\n self.rpn = rpn\n self.roi_heads = roi_heads\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GeneralizedRCNN(nn.Module):\n\n def __init__(self, backbone, rpn, roi_heads, transform):\n super(GeneralizedRCNN, self).__init__()\n self.transform = transform\n self.backbone = backbone\n self.rpn = rpn\n self.roi_heads = roi_heads\n\n def forward(self, images, targets=None):\n if self.training and targets is None:\n raise ValueError('In training model, targets should be passed')\n if self.training:\n assert targets is not None\n for target in targets:\n boxes = target['boxes']\n if isinstance(boxes, torch.Tensor):\n if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n raise ValueError(\n f'Expected target bxes to be a tensorof shape [N, 4], got {boxes.shape}.'\n )\n else:\n raise ValueError(\n f'Expected target boxes to be of typeTensor, got {type(boxes)}.'\n )\n original_image_sizes: List[Tuple[int, int]] = []\n for img in images:\n val = img.shape[-2:]\n assert len(val) == 2\n original_image_sizes.append((val[0], val[1]))\n images, targets = self.transform(images, targets)\n if targets is not None:\n for target_idx, target in enumerate(targets):\n boxes = target['boxes']\n degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]\n if degenerate_boxes.any():\n bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]\n degen_bb: List[float] = boxes[bb_idx].tolist()\n raise ValueError(\n f'All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}'\n )\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict(['0', features])\n proposals, proposal_losses = self.rpn(images, features, targets)\n detections, detector_losses = self.roi_heads(features, proposals,\n images.image_sizes, targets)\n detections = self.transform.postprocess(detections, images.\n image_sizes, original_image_sizes)\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses, detections\n",
"step-4": "from collections import OrderedDict\nimport torch\nfrom torch import nn, Tensor\nimport warnings\nfrom typing import Tuple, List, Dict, Optional, Union\n\n\nclass GeneralizedRCNN(nn.Module):\n\n def __init__(self, backbone, rpn, roi_heads, transform):\n super(GeneralizedRCNN, self).__init__()\n self.transform = transform\n self.backbone = backbone\n self.rpn = rpn\n self.roi_heads = roi_heads\n\n def forward(self, images, targets=None):\n if self.training and targets is None:\n raise ValueError('In training model, targets should be passed')\n if self.training:\n assert targets is not None\n for target in targets:\n boxes = target['boxes']\n if isinstance(boxes, torch.Tensor):\n if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n raise ValueError(\n f'Expected target bxes to be a tensorof shape [N, 4], got {boxes.shape}.'\n )\n else:\n raise ValueError(\n f'Expected target boxes to be of typeTensor, got {type(boxes)}.'\n )\n original_image_sizes: List[Tuple[int, int]] = []\n for img in images:\n val = img.shape[-2:]\n assert len(val) == 2\n original_image_sizes.append((val[0], val[1]))\n images, targets = self.transform(images, targets)\n if targets is not None:\n for target_idx, target in enumerate(targets):\n boxes = target['boxes']\n degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]\n if degenerate_boxes.any():\n bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]\n degen_bb: List[float] = boxes[bb_idx].tolist()\n raise ValueError(\n f'All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}'\n )\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict(['0', features])\n proposals, proposal_losses = self.rpn(images, features, targets)\n detections, detector_losses = self.roi_heads(features, proposals,\n images.image_sizes, targets)\n detections = self.transform.postprocess(detections, images.\n image_sizes, original_image_sizes)\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses, detections\n",
"step-5": "from collections import OrderedDict\nimport torch\nfrom torch import nn, Tensor\nimport warnings\nfrom typing import Tuple, List, Dict, Optional, Union\n\n\nclass GeneralizedRCNN(nn.Module):\n\n def __init__(self, backbone, rpn, roi_heads, transform):\n super(GeneralizedRCNN, self).__init__()\n self.transform = transform\n self.backbone = backbone \n self.rpn = rpn\n self.roi_heads = roi_heads \n\n def forward(self, images, targets=None):\n\n if self.training and targets is None:\n raise ValueError(\"In training model, targets should be passed\")\n if self.training:\n assert targets is not None\n\n # GT box shape,dtype check \n for target in targets:\n boxes = target[\"boxes\"]\n if isinstance(boxes, torch.Tensor):\n if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n raise ValueError(f\"Expected target bxes to be a tensor\"\n f\"of shape [N, 4], got {boxes.shape}.\")\n\n else:\n raise ValueError(f\"Expected target boxes to be of type\" \n f\"Tensor, got {type(boxes)}.\")\n\n # add original image sizes\n original_image_sizes : List[Tuple[int, int]] = []\n for img in images:\n val = img.shape[-2:] # (height, width)\n assert len(val) == 2\n original_image_sizes.append((val[0], val[1]))\n\n images, targets = self.transform(images, targets)\n\n # Check for degenerate boxes\n if targets is not None:\n for target_idx, target in enumerate(targets):\n boxes = target[\"boxes\"]\n\n # degenerate boxes are boxes with x2y2 valeus smaller than x1y1\n degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]\n if degenerate_boxes.any():\n # print the first degenerate box\n bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]\n degen_bb: List[float] = boxes[bb_idx].tolist()\n raise ValueError(f\"All bounding boxes should have positive height and width.\"\n f\" Found invalid box {degen_bb} for target at index {target_idx}\")\n\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict(['0', features])\n proposals, proposal_losses = self.rpn(images, features, targets)\n detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)\n detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n\n return losses, detections \n\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def minOps(n):
if n <= 1:
return 0
res = 0
if n % 2 == 1:
for i in range(1, n // 2 + 1):
res += i * 2
return res
else:
for j in range(1, n // 2 + 1):
res += j * 2 - 1
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def minOps(n):
if n <= 1:
return 0
res = 0
if n % 2 == 1:
for i in range(1, n // 2 + 1):
res += i * 2
return res
else:
for j in range(1, n // 2 + 1):
res += j * 2 - 1
return res
def summationMin(n):
return n * n // 4
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def minOps(n):
if n <= 1:
return 0
res = 0
if n % 2 == 1:
for i in range(1, n // 2 + 1):
res += i * 2
return res
else:
for j in range(1, n // 2 + 1):
res += j * 2 - 1
return res
def summationMin(n):
return n * n // 4
for i in range(10):
print(summationMin(i))
<|reserved_special_token_1|>
# You have an array arr of length n where arr[i] = (2 * i) + 1 for all valid values of i (i.e. 0 <= i < n).
# In one operation, you can select two indices x and y where 0 <= x, y < n and subtract 1 from arr[x] and add 1 to arr[y]
# (i.e. perform arr[x] -=1 and arr[y] += 1). The goal is to make all the elements of the array equal.
# It is guaranteed that all the elements of the array can be made equal using some operations.
# Given an integer n, the length of the array. Return the minimum number of operations needed to make all the elements of arr equal.
def minOps(n):
if n <= 1:
return 0
res = 0
if n%2 == 1:
for i in range(1, n//2 + 1):
res += i*2
return res
else:
for j in range(1, n//2 + 1):
res += j * 2 - 1
return res
def summationMin(n):
return n*n//4
for i in range(10):
print(summationMin(i))
|
flexible
|
{
"blob_id": "d67842c05af9241dbe7e038a9b2dc4223ee7ef4d",
"index": 8055,
"step-1": "<mask token>\n",
"step-2": "def minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n % 2 == 1:\n for i in range(1, n // 2 + 1):\n res += i * 2\n return res\n else:\n for j in range(1, n // 2 + 1):\n res += j * 2 - 1\n return res\n\n\n<mask token>\n",
"step-3": "def minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n % 2 == 1:\n for i in range(1, n // 2 + 1):\n res += i * 2\n return res\n else:\n for j in range(1, n // 2 + 1):\n res += j * 2 - 1\n return res\n\n\ndef summationMin(n):\n return n * n // 4\n\n\n<mask token>\n",
"step-4": "def minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n % 2 == 1:\n for i in range(1, n // 2 + 1):\n res += i * 2\n return res\n else:\n for j in range(1, n // 2 + 1):\n res += j * 2 - 1\n return res\n\n\ndef summationMin(n):\n return n * n // 4\n\n\nfor i in range(10):\n print(summationMin(i))\n",
"step-5": "# You have an array arr of length n where arr[i] = (2 * i) + 1 for all valid values of i (i.e. 0 <= i < n).\n\n# In one operation, you can select two indices x and y where 0 <= x, y < n and subtract 1 from arr[x] and add 1 to arr[y] \n# (i.e. perform arr[x] -=1 and arr[y] += 1). The goal is to make all the elements of the array equal. \n# It is guaranteed that all the elements of the array can be made equal using some operations.\n\n# Given an integer n, the length of the array. Return the minimum number of operations needed to make all the elements of arr equal.\n\ndef minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n%2 == 1:\n for i in range(1, n//2 + 1):\n res += i*2\n return res\n else:\n for j in range(1, n//2 + 1):\n res += j * 2 - 1\n return res\n\ndef summationMin(n):\n return n*n//4\n\n\nfor i in range(10):\n print(summationMin(i))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
def shot_percentage():
shotm = eval(input('enter the shots made: '))
shott = eval(input('enter the total shots: '))
shotper = shotm / shott
print('Shot percentage = ', shotper)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
def shot_percentage():
shotm = eval(input('enter the shots made: '))
shott = eval(input('enter the total shots: '))
shotper = shotm / shott
print('Shot percentage = ', shotper)
def coffee():
pound = eval(input('enter the amount of pounds purchased: '))
cost = pound * 10.5 + pound * 0.86 + 1.5
print('The total cost of coffee are', cost)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
def shot_percentage():
shotm = eval(input('enter the shots made: '))
shott = eval(input('enter the total shots: '))
shotper = shotm / shott
print('Shot percentage = ', shotper)
def coffee():
pound = eval(input('enter the amount of pounds purchased: '))
cost = pound * 10.5 + pound * 0.86 + 1.5
print('The total cost of coffee are', cost)
def kilometers_to_miles():
"""1 mile = 1.61 kilometers"""
miles = eval(input('enter the amount of miles driven: '))
driven = miles * 1.61
print('The amount of kilometers driven are: ', driven)
<|reserved_special_token_1|>
"""
Name: Thomas Scola
lab1.py
Problem: This function calculates the area of a rectangle
"""
'''def calc_area():'''
def calc_rec_area():
length = eval(input("Enter the length: "))
width = eval(input("Enter the width: "))
area = length * width
print("Area =", area)
def calc_rec_vol():
lengthh = eval(input("Enter the length: "))
widthh = eval(input("Enter the width: "))
heighth = eval(input("Enter the height: "))
volume = lengthh * widthh * heighth
print("Volume =", volume)
def shot_percentage():
shotm = eval(input("enter the shots made: "))
shott = eval(input("enter the total shots: "))
shotper = shotm / shott
print("Shot percentage = ", shotper)
def coffee():
pound = eval(input("enter the amount of pounds purchased: "))
cost = (pound * 10.50) + (pound * 0.86) + 1.50
print("The total cost of coffee are", cost)
def kilometers_to_miles():
"""1 mile = 1.61 kilometers"""
miles = eval(input("enter the amount of miles driven: "))
driven = miles * 1.61
print("The amount of kilometers driven are: ", driven)
|
flexible
|
{
"blob_id": "076e10b3741542b7137f6ac517dba482f545b123",
"index": 2154,
"step-1": "<mask token>\n\n\ndef calc_rec_area():\n length = eval(input('Enter the length: '))\n width = eval(input('Enter the width: '))\n area = length * width\n print('Area =', area)\n\n\ndef calc_rec_vol():\n lengthh = eval(input('Enter the length: '))\n widthh = eval(input('Enter the width: '))\n heighth = eval(input('Enter the height: '))\n volume = lengthh * widthh * heighth\n print('Volume =', volume)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_rec_area():\n length = eval(input('Enter the length: '))\n width = eval(input('Enter the width: '))\n area = length * width\n print('Area =', area)\n\n\ndef calc_rec_vol():\n lengthh = eval(input('Enter the length: '))\n widthh = eval(input('Enter the width: '))\n heighth = eval(input('Enter the height: '))\n volume = lengthh * widthh * heighth\n print('Volume =', volume)\n\n\ndef shot_percentage():\n shotm = eval(input('enter the shots made: '))\n shott = eval(input('enter the total shots: '))\n shotper = shotm / shott\n print('Shot percentage = ', shotper)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calc_rec_area():\n length = eval(input('Enter the length: '))\n width = eval(input('Enter the width: '))\n area = length * width\n print('Area =', area)\n\n\ndef calc_rec_vol():\n lengthh = eval(input('Enter the length: '))\n widthh = eval(input('Enter the width: '))\n heighth = eval(input('Enter the height: '))\n volume = lengthh * widthh * heighth\n print('Volume =', volume)\n\n\ndef shot_percentage():\n shotm = eval(input('enter the shots made: '))\n shott = eval(input('enter the total shots: '))\n shotper = shotm / shott\n print('Shot percentage = ', shotper)\n\n\ndef coffee():\n pound = eval(input('enter the amount of pounds purchased: '))\n cost = pound * 10.5 + pound * 0.86 + 1.5\n print('The total cost of coffee are', cost)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef calc_rec_area():\n length = eval(input('Enter the length: '))\n width = eval(input('Enter the width: '))\n area = length * width\n print('Area =', area)\n\n\ndef calc_rec_vol():\n lengthh = eval(input('Enter the length: '))\n widthh = eval(input('Enter the width: '))\n heighth = eval(input('Enter the height: '))\n volume = lengthh * widthh * heighth\n print('Volume =', volume)\n\n\ndef shot_percentage():\n shotm = eval(input('enter the shots made: '))\n shott = eval(input('enter the total shots: '))\n shotper = shotm / shott\n print('Shot percentage = ', shotper)\n\n\ndef coffee():\n pound = eval(input('enter the amount of pounds purchased: '))\n cost = pound * 10.5 + pound * 0.86 + 1.5\n print('The total cost of coffee are', cost)\n\n\ndef kilometers_to_miles():\n \"\"\"1 mile = 1.61 kilometers\"\"\"\n miles = eval(input('enter the amount of miles driven: '))\n driven = miles * 1.61\n print('The amount of kilometers driven are: ', driven)\n",
"step-5": "\"\"\"\nName: Thomas Scola\nlab1.py\n\nProblem: This function calculates the area of a rectangle\n\"\"\"\n\n\n'''def calc_area():'''\ndef calc_rec_area():\n length = eval(input(\"Enter the length: \"))\n width = eval(input(\"Enter the width: \"))\n area = length * width\n print(\"Area =\", area)\n\ndef calc_rec_vol():\n lengthh = eval(input(\"Enter the length: \"))\n widthh = eval(input(\"Enter the width: \"))\n heighth = eval(input(\"Enter the height: \"))\n volume = lengthh * widthh * heighth\n print(\"Volume =\", volume)\n\ndef shot_percentage():\n shotm = eval(input(\"enter the shots made: \"))\n shott = eval(input(\"enter the total shots: \"))\n shotper = shotm / shott\n print(\"Shot percentage = \", shotper)\n\ndef coffee():\n pound = eval(input(\"enter the amount of pounds purchased: \"))\n cost = (pound * 10.50) + (pound * 0.86) + 1.50\n print(\"The total cost of coffee are\", cost)\n\ndef kilometers_to_miles():\n \"\"\"1 mile = 1.61 kilometers\"\"\"\n miles = eval(input(\"enter the amount of miles driven: \"))\n driven = miles * 1.61\n print(\"The amount of kilometers driven are: \", driven)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class SnakeGame(object):
<|reserved_special_token_0|>
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SnakeGame(object):
<|reserved_special_token_0|>
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
@staticmethod
def _get_image(surface):
ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,
SCREEN_WIDTH))))
for j in range(SCREEN_HEIGHT):
for k in range(SCREEN_WIDTH):
ret[j][k] = surface.get_at((k, j))
return np.array(ret)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SnakeGame(object):
def __init__(self):
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
self.surface.fill((255, 255, 255))
self.clock = pygame.time.Clock()
self.fps = 60
self.done = False
pygame.key.set_repeat(1, 40)
self.screen.blit(self.surface, (0, 0))
pygame.init()
self.fpsClock = pygame.time.Clock()
self.snake = Snake()
self.apple = Apple()
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
@staticmethod
def _get_image(surface):
ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,
SCREEN_WIDTH))))
for j in range(SCREEN_HEIGHT):
for k in range(SCREEN_WIDTH):
ret[j][k] = surface.get_at((k, j))
return np.array(ret)
<|reserved_special_token_1|>
import pygame
import sys
import time
import random
from snake_gym.envs.modules import *
from pygame.locals import *
import numpy as np
class SnakeGame(object):
def __init__(self):
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
self.surface.fill((255, 255, 255))
self.clock = pygame.time.Clock()
self.fps = 60
self.done = False
pygame.key.set_repeat(1, 40)
self.screen.blit(self.surface, (0, 0))
pygame.init()
self.fpsClock = pygame.time.Clock()
self.snake = Snake()
self.apple = Apple()
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
@staticmethod
def _get_image(surface):
ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,
SCREEN_WIDTH))))
for j in range(SCREEN_HEIGHT):
for k in range(SCREEN_WIDTH):
ret[j][k] = surface.get_at((k, j))
return np.array(ret)
|
flexible
|
{
"blob_id": "6d61df9ac072100d01a1ce3cf7b4c056f66a163c",
"index": 502,
"step-1": "<mask token>\n\n\nclass SnakeGame(object):\n <mask token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SnakeGame(object):\n <mask token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-3": "<mask token>\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-4": "import pygame\nimport sys\nimport time\nimport random\nfrom snake_gym.envs.modules import *\nfrom pygame.locals import *\nimport numpy as np\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
#!/usr/bin/env python
import sys
import errno
# read first line from stdin and discard it
first_line = sys.stdin.readline()
# print all other lines
for line in sys.stdin:
try:
print line,
except IOError, e:
if e.errno == errno.EPIPE:
exit(0)
|
normal
|
{
"blob_id": "bd06b04666ade1e7591b02f8211bc9b62fd08936",
"index": 791,
"step-1": "#!/usr/bin/env python\nimport sys\nimport errno\n\n# read first line from stdin and discard it\nfirst_line = sys.stdin.readline()\n\n# print all other lines\nfor line in sys.stdin:\n try:\n print line,\n except IOError, e:\n if e.errno == errno.EPIPE:\n exit(0)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('foodBookApp', '0027_remove_post_total_comments')]
operations = [migrations.AlterField(model_name='post', name='likes',
field=models.ManyToManyField(blank=True, related_name='like_post',
to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name=
'post', name='privacy', field=models.CharField(choices=[('public',
'Public'), ('private', 'Private'), ('friends', 'Friends Only')],
default='public', max_length=7)), migrations.AlterField(model_name=
'profile', name='privacy', field=models.CharField(choices=[(
'public', 'Public'), ('private', 'Private'), ('friends',
'Friends Only')], default='public', max_length=7))]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('foodBookApp', '0027_remove_post_total_comments')]
operations = [migrations.AlterField(model_name='post', name='likes',
field=models.ManyToManyField(blank=True, related_name='like_post',
to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name=
'post', name='privacy', field=models.CharField(choices=[('public',
'Public'), ('private', 'Private'), ('friends', 'Friends Only')],
default='public', max_length=7)), migrations.AlterField(model_name=
'profile', name='privacy', field=models.CharField(choices=[(
'public', 'Public'), ('private', 'Private'), ('friends',
'Friends Only')], default='public', max_length=7))]
<|reserved_special_token_1|>
# Generated by Django 3.1.3 on 2020-11-27 02:17
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('foodBookApp', '0027_remove_post_total_comments'),
]
operations = [
migrations.AlterField(
model_name='post',
name='likes',
field=models.ManyToManyField(blank=True, related_name='like_post', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='privacy',
field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),
),
migrations.AlterField(
model_name='profile',
name='privacy',
field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),
),
]
|
flexible
|
{
"blob_id": "84d9400dc4ee0bebce3f5f7da0bd77a280bb54a9",
"index": 8503,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('foodBookApp', '0027_remove_post_total_comments')]\n operations = [migrations.AlterField(model_name='post', name='likes',\n field=models.ManyToManyField(blank=True, related_name='like_post',\n to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name=\n 'post', name='privacy', field=models.CharField(choices=[('public',\n 'Public'), ('private', 'Private'), ('friends', 'Friends Only')],\n default='public', max_length=7)), migrations.AlterField(model_name=\n 'profile', name='privacy', field=models.CharField(choices=[(\n 'public', 'Public'), ('private', 'Private'), ('friends',\n 'Friends Only')], default='public', max_length=7))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('foodBookApp', '0027_remove_post_total_comments')]\n operations = [migrations.AlterField(model_name='post', name='likes',\n field=models.ManyToManyField(blank=True, related_name='like_post',\n to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name=\n 'post', name='privacy', field=models.CharField(choices=[('public',\n 'Public'), ('private', 'Private'), ('friends', 'Friends Only')],\n default='public', max_length=7)), migrations.AlterField(model_name=\n 'profile', name='privacy', field=models.CharField(choices=[(\n 'public', 'Public'), ('private', 'Private'), ('friends',\n 'Friends Only')], default='public', max_length=7))]\n",
"step-5": "# Generated by Django 3.1.3 on 2020-11-27 02:17\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('foodBookApp', '0027_remove_post_total_comments'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='likes',\n field=models.ManyToManyField(blank=True, related_name='like_post', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='post',\n name='privacy',\n field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),\n ),\n migrations.AlterField(\n model_name='profile',\n name='privacy',\n field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import mechanicalsoup
from bs4 import BeautifulSoup
import re
import json
def extract_title(page):
return page.find("header").find("h1").contents[0]
def extract_colours(page):
color_list = page.find("ul")
return list(dict.fromkeys(re.findall("#\w+", str(color_list.contents))))
def get_colours_from_page(browser, baseurl, target_page):
response = browser.open(baseurl + target_page)
soup = BeautifulSoup(response.text, 'lxml')
extract = soup.find("section", {"id": "item"})
entity = {"title": extract_title(extract), "colours": extract_colours(extract)}
return entity
def get_links_from_article(articles):
links = []
for article in articles:
links.append(article.find("a").attrs['href'])
return links
def scrape_flag_pagination_page(browser, baseurl, pageCount):
response = browser.open(baseurl + "/flags?page={0}".format(pageCount))
soup = BeautifulSoup(response.text, 'lxml')
flag_articles = soup.findAll("article")
return get_links_from_article(flag_articles)
baseurl = "https://encycolorpedia.com"
browser = mechanicalsoup.StatefulBrowser(raise_on_404=True)
list_of_urls = []
flag_count = 0
pageCount = 1
while(True):
try:
list_of_urls += scrape_flag_pagination_page(browser, baseurl, pageCount)
except mechanicalsoup.utils.LinkNotFoundError:
break
pageCount += 1
package = []
for url in list_of_urls:
package.append(get_colours_from_page(browser, baseurl, url))
with open('flag_colours.json', 'w', encoding='utf-8') as f:
json.dump(package, f, ensure_ascii=False, indent=4)
|
normal
|
{
"blob_id": "9fd33089a9dc919ef2fb2698059e60a24a0e05e6",
"index": 6118,
"step-1": "<mask token>\n\n\ndef extract_title(page):\n return page.find('header').find('h1').contents[0]\n\n\ndef extract_colours(page):\n color_list = page.find('ul')\n return list(dict.fromkeys(re.findall('#\\\\w+', str(color_list.contents))))\n\n\ndef get_colours_from_page(browser, baseurl, target_page):\n response = browser.open(baseurl + target_page)\n soup = BeautifulSoup(response.text, 'lxml')\n extract = soup.find('section', {'id': 'item'})\n entity = {'title': extract_title(extract), 'colours': extract_colours(\n extract)}\n return entity\n\n\ndef get_links_from_article(articles):\n links = []\n for article in articles:\n links.append(article.find('a').attrs['href'])\n return links\n\n\ndef scrape_flag_pagination_page(browser, baseurl, pageCount):\n response = browser.open(baseurl + '/flags?page={0}'.format(pageCount))\n soup = BeautifulSoup(response.text, 'lxml')\n flag_articles = soup.findAll('article')\n return get_links_from_article(flag_articles)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef extract_title(page):\n return page.find('header').find('h1').contents[0]\n\n\ndef extract_colours(page):\n color_list = page.find('ul')\n return list(dict.fromkeys(re.findall('#\\\\w+', str(color_list.contents))))\n\n\ndef get_colours_from_page(browser, baseurl, target_page):\n response = browser.open(baseurl + target_page)\n soup = BeautifulSoup(response.text, 'lxml')\n extract = soup.find('section', {'id': 'item'})\n entity = {'title': extract_title(extract), 'colours': extract_colours(\n extract)}\n return entity\n\n\ndef get_links_from_article(articles):\n links = []\n for article in articles:\n links.append(article.find('a').attrs['href'])\n return links\n\n\ndef scrape_flag_pagination_page(browser, baseurl, pageCount):\n response = browser.open(baseurl + '/flags?page={0}'.format(pageCount))\n soup = BeautifulSoup(response.text, 'lxml')\n flag_articles = soup.findAll('article')\n return get_links_from_article(flag_articles)\n\n\n<mask token>\nwhile True:\n try:\n list_of_urls += scrape_flag_pagination_page(browser, baseurl, pageCount\n )\n except mechanicalsoup.utils.LinkNotFoundError:\n break\n pageCount += 1\n<mask token>\nfor url in list_of_urls:\n package.append(get_colours_from_page(browser, baseurl, url))\nwith open('flag_colours.json', 'w', encoding='utf-8') as f:\n json.dump(package, f, ensure_ascii=False, indent=4)\n",
"step-3": "<mask token>\n\n\ndef extract_title(page):\n return page.find('header').find('h1').contents[0]\n\n\ndef extract_colours(page):\n color_list = page.find('ul')\n return list(dict.fromkeys(re.findall('#\\\\w+', str(color_list.contents))))\n\n\ndef get_colours_from_page(browser, baseurl, target_page):\n response = browser.open(baseurl + target_page)\n soup = BeautifulSoup(response.text, 'lxml')\n extract = soup.find('section', {'id': 'item'})\n entity = {'title': extract_title(extract), 'colours': extract_colours(\n extract)}\n return entity\n\n\ndef get_links_from_article(articles):\n links = []\n for article in articles:\n links.append(article.find('a').attrs['href'])\n return links\n\n\ndef scrape_flag_pagination_page(browser, baseurl, pageCount):\n response = browser.open(baseurl + '/flags?page={0}'.format(pageCount))\n soup = BeautifulSoup(response.text, 'lxml')\n flag_articles = soup.findAll('article')\n return get_links_from_article(flag_articles)\n\n\nbaseurl = 'https://encycolorpedia.com'\nbrowser = mechanicalsoup.StatefulBrowser(raise_on_404=True)\nlist_of_urls = []\nflag_count = 0\npageCount = 1\nwhile True:\n try:\n list_of_urls += scrape_flag_pagination_page(browser, baseurl, pageCount\n )\n except mechanicalsoup.utils.LinkNotFoundError:\n break\n pageCount += 1\npackage = []\nfor url in list_of_urls:\n package.append(get_colours_from_page(browser, baseurl, url))\nwith open('flag_colours.json', 'w', encoding='utf-8') as f:\n json.dump(package, f, ensure_ascii=False, indent=4)\n",
"step-4": "import mechanicalsoup\nfrom bs4 import BeautifulSoup\nimport re\nimport json\n\n\ndef extract_title(page):\n return page.find('header').find('h1').contents[0]\n\n\ndef extract_colours(page):\n color_list = page.find('ul')\n return list(dict.fromkeys(re.findall('#\\\\w+', str(color_list.contents))))\n\n\ndef get_colours_from_page(browser, baseurl, target_page):\n response = browser.open(baseurl + target_page)\n soup = BeautifulSoup(response.text, 'lxml')\n extract = soup.find('section', {'id': 'item'})\n entity = {'title': extract_title(extract), 'colours': extract_colours(\n extract)}\n return entity\n\n\ndef get_links_from_article(articles):\n links = []\n for article in articles:\n links.append(article.find('a').attrs['href'])\n return links\n\n\ndef scrape_flag_pagination_page(browser, baseurl, pageCount):\n response = browser.open(baseurl + '/flags?page={0}'.format(pageCount))\n soup = BeautifulSoup(response.text, 'lxml')\n flag_articles = soup.findAll('article')\n return get_links_from_article(flag_articles)\n\n\nbaseurl = 'https://encycolorpedia.com'\nbrowser = mechanicalsoup.StatefulBrowser(raise_on_404=True)\nlist_of_urls = []\nflag_count = 0\npageCount = 1\nwhile True:\n try:\n list_of_urls += scrape_flag_pagination_page(browser, baseurl, pageCount\n )\n except mechanicalsoup.utils.LinkNotFoundError:\n break\n pageCount += 1\npackage = []\nfor url in list_of_urls:\n package.append(get_colours_from_page(browser, baseurl, url))\nwith open('flag_colours.json', 'w', encoding='utf-8') as f:\n json.dump(package, f, ensure_ascii=False, indent=4)\n",
"step-5": "import mechanicalsoup\nfrom bs4 import BeautifulSoup\nimport re\nimport json\n\n\ndef extract_title(page):\n return page.find(\"header\").find(\"h1\").contents[0]\n\n\n\ndef extract_colours(page):\n color_list = page.find(\"ul\")\n return list(dict.fromkeys(re.findall(\"#\\w+\", str(color_list.contents))))\n\n\ndef get_colours_from_page(browser, baseurl, target_page):\n response = browser.open(baseurl + target_page)\n soup = BeautifulSoup(response.text, 'lxml')\n extract = soup.find(\"section\", {\"id\": \"item\"})\n entity = {\"title\": extract_title(extract), \"colours\": extract_colours(extract)}\n return entity\n\ndef get_links_from_article(articles):\n links = []\n for article in articles:\n links.append(article.find(\"a\").attrs['href'])\n return links\n\n\ndef scrape_flag_pagination_page(browser, baseurl, pageCount):\n response = browser.open(baseurl + \"/flags?page={0}\".format(pageCount))\n soup = BeautifulSoup(response.text, 'lxml')\n flag_articles = soup.findAll(\"article\")\n return get_links_from_article(flag_articles)\n\n\n\nbaseurl = \"https://encycolorpedia.com\"\nbrowser = mechanicalsoup.StatefulBrowser(raise_on_404=True)\nlist_of_urls = []\nflag_count = 0\npageCount = 1\nwhile(True):\n try:\n list_of_urls += scrape_flag_pagination_page(browser, baseurl, pageCount)\n except mechanicalsoup.utils.LinkNotFoundError:\n break\n pageCount += 1\npackage = []\nfor url in list_of_urls:\n package.append(get_colours_from_page(browser, baseurl, url))\n\nwith open('flag_colours.json', 'w', encoding='utf-8') as f:\n json.dump(package, f, ensure_ascii=False, indent=4)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
configuration = Configuration()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import absolute_import
from .models.basic_channel_info import BasicChannelInfo
from .models.basic_follower_info import BasicFollowerInfo
from .models.basic_following_info import BasicFollowingInfo
from .models.categories import Categories
from .models.category import Category
from .models.channel_details import ChannelDetails
from .models.channel_search_results import ChannelSearchResults
from .models.channel_video import ChannelVideo
from .models.channel_videos import ChannelVideos
from .models.description_panel import DescriptionPanel
from .models.event import Event
from .models.events import Events
from .models.language import Language
from .models.languages import Languages
from .models.mobile_notify_settings import MobileNotifySettings
from .models.multi_participant import MultiParticipant
from .models.notification import Notification
from .models.notification_1 import Notification1
from .models.notifications import Notifications
from .models.online_channels import OnlineChannels
from .models.online_details import OnlineDetails
from .models.online_notify_settings import OnlineNotifySettings
from .models.thumbnail import Thumbnail
from .models.user_data import UserData
from .models.user_email_settings import UserEmailSettings
from .models.video_search_result import VideoSearchResult
from .models.video_search_results import VideoSearchResults
from .models.webhook import Webhook
from .apis.bot_api import BotApi
from .apis.channel_api import ChannelApi
from .apis.multistream_api import MultistreamApi
from .apis.public_api import PublicApi
from .apis.sensitive_api import SensitiveApi
from .apis.user_api import UserApi
from .apis.webhook_api import WebhookApi
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
<|reserved_special_token_1|>
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.basic_channel_info import BasicChannelInfo
from .models.basic_follower_info import BasicFollowerInfo
from .models.basic_following_info import BasicFollowingInfo
from .models.categories import Categories
from .models.category import Category
from .models.channel_details import ChannelDetails
from .models.channel_search_results import ChannelSearchResults
from .models.channel_video import ChannelVideo
from .models.channel_videos import ChannelVideos
from .models.description_panel import DescriptionPanel
from .models.event import Event
from .models.events import Events
from .models.language import Language
from .models.languages import Languages
from .models.mobile_notify_settings import MobileNotifySettings
from .models.multi_participant import MultiParticipant
from .models.notification import Notification
from .models.notification_1 import Notification1
from .models.notifications import Notifications
from .models.online_channels import OnlineChannels
from .models.online_details import OnlineDetails
from .models.online_notify_settings import OnlineNotifySettings
from .models.thumbnail import Thumbnail
from .models.user_data import UserData
from .models.user_email_settings import UserEmailSettings
from .models.video_search_result import VideoSearchResult
from .models.video_search_results import VideoSearchResults
from .models.webhook import Webhook
# import apis into sdk package
from .apis.bot_api import BotApi
from .apis.channel_api import ChannelApi
from .apis.multistream_api import MultistreamApi
from .apis.public_api import PublicApi
from .apis.sensitive_api import SensitiveApi
from .apis.user_api import UserApi
from .apis.webhook_api import WebhookApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
|
flexible
|
{
"blob_id": "939011fca968d5f9250beb29a0bb700200e637df",
"index": 6274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconfiguration = Configuration()\n",
"step-3": "<mask token>\nfrom __future__ import absolute_import\nfrom .models.basic_channel_info import BasicChannelInfo\nfrom .models.basic_follower_info import BasicFollowerInfo\nfrom .models.basic_following_info import BasicFollowingInfo\nfrom .models.categories import Categories\nfrom .models.category import Category\nfrom .models.channel_details import ChannelDetails\nfrom .models.channel_search_results import ChannelSearchResults\nfrom .models.channel_video import ChannelVideo\nfrom .models.channel_videos import ChannelVideos\nfrom .models.description_panel import DescriptionPanel\nfrom .models.event import Event\nfrom .models.events import Events\nfrom .models.language import Language\nfrom .models.languages import Languages\nfrom .models.mobile_notify_settings import MobileNotifySettings\nfrom .models.multi_participant import MultiParticipant\nfrom .models.notification import Notification\nfrom .models.notification_1 import Notification1\nfrom .models.notifications import Notifications\nfrom .models.online_channels import OnlineChannels\nfrom .models.online_details import OnlineDetails\nfrom .models.online_notify_settings import OnlineNotifySettings\nfrom .models.thumbnail import Thumbnail\nfrom .models.user_data import UserData\nfrom .models.user_email_settings import UserEmailSettings\nfrom .models.video_search_result import VideoSearchResult\nfrom .models.video_search_results import VideoSearchResults\nfrom .models.webhook import Webhook\nfrom .apis.bot_api import BotApi\nfrom .apis.channel_api import ChannelApi\nfrom .apis.multistream_api import MultistreamApi\nfrom .apis.public_api import PublicApi\nfrom .apis.sensitive_api import SensitiveApi\nfrom .apis.user_api import UserApi\nfrom .apis.webhook_api import WebhookApi\nfrom .api_client import ApiClient\nfrom .configuration import Configuration\nconfiguration = Configuration()\n",
"step-4": "# coding: utf-8\n\n\"\"\"\n Picarto.TV API Documentation\n\n The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details \n\n OpenAPI spec version: 1.2.5\n Contact: [email protected]\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\n# import models into sdk package\nfrom .models.basic_channel_info import BasicChannelInfo\nfrom .models.basic_follower_info import BasicFollowerInfo\nfrom .models.basic_following_info import BasicFollowingInfo\nfrom .models.categories import Categories\nfrom .models.category import Category\nfrom .models.channel_details import ChannelDetails\nfrom .models.channel_search_results import ChannelSearchResults\nfrom .models.channel_video import ChannelVideo\nfrom .models.channel_videos import ChannelVideos\nfrom .models.description_panel import DescriptionPanel\nfrom .models.event import Event\nfrom .models.events import Events\nfrom .models.language import Language\nfrom .models.languages import Languages\nfrom .models.mobile_notify_settings import MobileNotifySettings\nfrom .models.multi_participant import MultiParticipant\nfrom .models.notification import Notification\nfrom .models.notification_1 import Notification1\nfrom .models.notifications import Notifications\nfrom .models.online_channels import OnlineChannels\nfrom .models.online_details import OnlineDetails\nfrom .models.online_notify_settings import OnlineNotifySettings\nfrom .models.thumbnail import Thumbnail\nfrom .models.user_data import UserData\nfrom .models.user_email_settings import UserEmailSettings\nfrom .models.video_search_result import VideoSearchResult\nfrom .models.video_search_results import VideoSearchResults\nfrom .models.webhook import Webhook\n\n# import apis into sdk package\nfrom .apis.bot_api import BotApi\nfrom .apis.channel_api import ChannelApi\nfrom .apis.multistream_api import MultistreamApi\nfrom .apis.public_api import PublicApi\nfrom .apis.sensitive_api import SensitiveApi\nfrom .apis.user_api import UserApi\nfrom .apis.webhook_api import WebhookApi\n\n# import ApiClient\nfrom .api_client import ApiClient\n\nfrom .configuration import Configuration\n\nconfiguration = Configuration()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])
def proveedor():
frm = form.Fr_Proveedor(request.form)
if request.method == 'POST':
pr = Proveedor.query.filter_by(CI=frm.CI.data).first()
if frm.validate() and pr is None:
new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.
CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.
data, convencional=frm.Convencional.data, Celular=frm.
Celular.data)
db.session.add(new_user)
db.session.commit()
flash('Se registrado con exito sus datos')
return redirect(url_for('Proveedor.proveedor'))
else:
flash('Error: No se registrado con exito sus Datos')
return render_template('Proveedor/frproveedor.html', frm=frm)
<|reserved_special_token_0|>
@_Proveedor.route('/UpdateP', methods=['POST'])
def UpdateP():
print(request.form)
updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()
print('ci:', updateP.CI)
updateP.razonSolcial = request.form['RasonSocial']
updateP.Direccion = request.form['Direccion']
updateP.Correo = request.form['Correo']
updateP.convencional = request.form['Convencional']
updateP.Celular = request.form['Celular']
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
<|reserved_special_token_0|>
@_Proveedor.route('/modalP')
def modalP():
frm = form.Fr_Proveedor(request.form)
return render_template('modal/modaproveedor.html', frm=frm, title=
'Proveedor')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])
def proveedor():
frm = form.Fr_Proveedor(request.form)
if request.method == 'POST':
pr = Proveedor.query.filter_by(CI=frm.CI.data).first()
if frm.validate() and pr is None:
new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.
CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.
data, convencional=frm.Convencional.data, Celular=frm.
Celular.data)
db.session.add(new_user)
db.session.commit()
flash('Se registrado con exito sus datos')
return redirect(url_for('Proveedor.proveedor'))
else:
flash('Error: No se registrado con exito sus Datos')
return render_template('Proveedor/frproveedor.html', frm=frm)
@_Proveedor.route('/listaP')
def listaP():
titulo = 'Lista Proveedor'
return render_template('Proveedor/listaP.html', titulo=titulo, listas=
Proveedor.query.all())
@_Proveedor.route('/UpdateP', methods=['POST'])
def UpdateP():
print(request.form)
updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()
print('ci:', updateP.CI)
updateP.razonSolcial = request.form['RasonSocial']
updateP.Direccion = request.form['Direccion']
updateP.Correo = request.form['Correo']
updateP.convencional = request.form['Convencional']
updateP.Celular = request.form['Celular']
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/deleteP/<string:id>', methods=['GET', 'POST'])
def deleteP(id=None):
dlTP = Proveedor.query.filter_by(CI=id).first()
db.session.delete(dlTP)
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/modalP')
def modalP():
frm = form.Fr_Proveedor(request.form)
return render_template('modal/modaproveedor.html', frm=frm, title=
'Proveedor')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_Proveedor = Blueprint('Proveedor', __name__, url_prefix='/Proveedor')
@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])
def proveedor():
frm = form.Fr_Proveedor(request.form)
if request.method == 'POST':
pr = Proveedor.query.filter_by(CI=frm.CI.data).first()
if frm.validate() and pr is None:
new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.
CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.
data, convencional=frm.Convencional.data, Celular=frm.
Celular.data)
db.session.add(new_user)
db.session.commit()
flash('Se registrado con exito sus datos')
return redirect(url_for('Proveedor.proveedor'))
else:
flash('Error: No se registrado con exito sus Datos')
return render_template('Proveedor/frproveedor.html', frm=frm)
@_Proveedor.route('/listaP')
def listaP():
titulo = 'Lista Proveedor'
return render_template('Proveedor/listaP.html', titulo=titulo, listas=
Proveedor.query.all())
@_Proveedor.route('/UpdateP', methods=['POST'])
def UpdateP():
print(request.form)
updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()
print('ci:', updateP.CI)
updateP.razonSolcial = request.form['RasonSocial']
updateP.Direccion = request.form['Direccion']
updateP.Correo = request.form['Correo']
updateP.convencional = request.form['Convencional']
updateP.Celular = request.form['Celular']
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/deleteP/<string:id>', methods=['GET', 'POST'])
def deleteP(id=None):
dlTP = Proveedor.query.filter_by(CI=id).first()
db.session.delete(dlTP)
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/modalP')
def modalP():
frm = form.Fr_Proveedor(request.form)
return render_template('modal/modaproveedor.html', frm=frm, title=
'Proveedor')
<|reserved_special_token_1|>
from flask import Blueprint, Flask, render_template, request, redirect, url_for, flash
from App import db
from App.Modulos.Proveedor.model import Proveedor
from App.Modulos.Proveedor import form
_Proveedor = Blueprint('Proveedor', __name__, url_prefix='/Proveedor')
@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])
def proveedor():
frm = form.Fr_Proveedor(request.form)
if request.method == 'POST':
pr = Proveedor.query.filter_by(CI=frm.CI.data).first()
if frm.validate() and pr is None:
new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.
CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.
data, convencional=frm.Convencional.data, Celular=frm.
Celular.data)
db.session.add(new_user)
db.session.commit()
flash('Se registrado con exito sus datos')
return redirect(url_for('Proveedor.proveedor'))
else:
flash('Error: No se registrado con exito sus Datos')
return render_template('Proveedor/frproveedor.html', frm=frm)
@_Proveedor.route('/listaP')
def listaP():
titulo = 'Lista Proveedor'
return render_template('Proveedor/listaP.html', titulo=titulo, listas=
Proveedor.query.all())
@_Proveedor.route('/UpdateP', methods=['POST'])
def UpdateP():
print(request.form)
updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()
print('ci:', updateP.CI)
updateP.razonSolcial = request.form['RasonSocial']
updateP.Direccion = request.form['Direccion']
updateP.Correo = request.form['Correo']
updateP.convencional = request.form['Convencional']
updateP.Celular = request.form['Celular']
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/deleteP/<string:id>', methods=['GET', 'POST'])
def deleteP(id=None):
dlTP = Proveedor.query.filter_by(CI=id).first()
db.session.delete(dlTP)
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/modalP')
def modalP():
frm = form.Fr_Proveedor(request.form)
return render_template('modal/modaproveedor.html', frm=frm, title=
'Proveedor')
<|reserved_special_token_1|>
#Importacion de Dependencias Flask
from flask import Blueprint,Flask, render_template, request,redirect,url_for,flash
#modelado de basedato.
from App import db
# Importacion de modulo de ModeloCliente
from App.Modulos.Proveedor.model import Proveedor
#Inportacion de modulo de formularioCliente
from App.Modulos.Proveedor import form
_Proveedor=Blueprint('Proveedor',__name__,url_prefix='/Proveedor')
@_Proveedor.route('/Proveedor', methods=['GET', 'POST']) # registro de proveedor
def proveedor():
frm = form.Fr_Proveedor(request.form)
if request.method == 'POST':
pr = Proveedor.query.filter_by(CI=frm.CI.data).first()
if frm.validate() and pr is None:
new_user = Proveedor(razonSolcial=frm.RasonSocial.data,
CI=frm.CI.data,
Direccion=frm.Direccion.data,
Correo=frm.Correo.data,
convencional=frm.Convencional.data,
Celular=frm.Celular.data
)
db.session.add(new_user)
db.session.commit()
flash("Se registrado con exito sus datos")
return redirect(url_for('Proveedor.proveedor'))
else:
flash("Error: No se registrado con exito sus Datos")
return render_template('Proveedor/frproveedor.html', frm=frm)
@_Proveedor.route('/listaP') # listado de Proveedores.
def listaP():
titulo = "Lista Proveedor"
return render_template("Proveedor/listaP.html", titulo=titulo, listas=Proveedor.query.all())
@_Proveedor.route('/UpdateP', methods=[ 'POST'])
def UpdateP():
print(request.form)
updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()
print("ci:",updateP.CI)
updateP.razonSolcial = request.form['RasonSocial']
updateP.Direccion = request.form['Direccion']
updateP.Correo = request.form['Correo']
updateP.convencional= request.form['Convencional']
updateP.Celular = request.form['Celular']
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/deleteP/<string:id>',methods=['GET','POST'])
def deleteP(id=None):
dlTP = Proveedor.query.filter_by(CI=id).first()
db.session.delete(dlTP)
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route("/modalP")
def modalP():
frm = form.Fr_Proveedor(request.form)
return render_template("modal/modaproveedor.html", frm=frm, title="Proveedor")
|
flexible
|
{
"blob_id": "99ecb927e22bc303dd9dffd2793887e7398dbb83",
"index": 3649,
"step-1": "<mask token>\n\n\n@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])\ndef proveedor():\n frm = form.Fr_Proveedor(request.form)\n if request.method == 'POST':\n pr = Proveedor.query.filter_by(CI=frm.CI.data).first()\n if frm.validate() and pr is None:\n new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.\n CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.\n data, convencional=frm.Convencional.data, Celular=frm.\n Celular.data)\n db.session.add(new_user)\n db.session.commit()\n flash('Se registrado con exito sus datos')\n return redirect(url_for('Proveedor.proveedor'))\n else:\n flash('Error: No se registrado con exito sus Datos')\n return render_template('Proveedor/frproveedor.html', frm=frm)\n\n\n<mask token>\n\n\n@_Proveedor.route('/UpdateP', methods=['POST'])\ndef UpdateP():\n print(request.form)\n updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()\n print('ci:', updateP.CI)\n updateP.razonSolcial = request.form['RasonSocial']\n updateP.Direccion = request.form['Direccion']\n updateP.Correo = request.form['Correo']\n updateP.convencional = request.form['Convencional']\n updateP.Celular = request.form['Celular']\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n<mask token>\n\n\n@_Proveedor.route('/modalP')\ndef modalP():\n frm = form.Fr_Proveedor(request.form)\n return render_template('modal/modaproveedor.html', frm=frm, title=\n 'Proveedor')\n",
"step-2": "<mask token>\n\n\n@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])\ndef proveedor():\n frm = form.Fr_Proveedor(request.form)\n if request.method == 'POST':\n pr = Proveedor.query.filter_by(CI=frm.CI.data).first()\n if frm.validate() and pr is None:\n new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.\n CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.\n data, convencional=frm.Convencional.data, Celular=frm.\n Celular.data)\n db.session.add(new_user)\n db.session.commit()\n flash('Se registrado con exito sus datos')\n return redirect(url_for('Proveedor.proveedor'))\n else:\n flash('Error: No se registrado con exito sus Datos')\n return render_template('Proveedor/frproveedor.html', frm=frm)\n\n\n@_Proveedor.route('/listaP')\ndef listaP():\n titulo = 'Lista Proveedor'\n return render_template('Proveedor/listaP.html', titulo=titulo, listas=\n Proveedor.query.all())\n\n\n@_Proveedor.route('/UpdateP', methods=['POST'])\ndef UpdateP():\n print(request.form)\n updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()\n print('ci:', updateP.CI)\n updateP.razonSolcial = request.form['RasonSocial']\n updateP.Direccion = request.form['Direccion']\n updateP.Correo = request.form['Correo']\n updateP.convencional = request.form['Convencional']\n updateP.Celular = request.form['Celular']\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/deleteP/<string:id>', methods=['GET', 'POST'])\ndef deleteP(id=None):\n dlTP = Proveedor.query.filter_by(CI=id).first()\n db.session.delete(dlTP)\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/modalP')\ndef modalP():\n frm = form.Fr_Proveedor(request.form)\n return render_template('modal/modaproveedor.html', frm=frm, title=\n 'Proveedor')\n",
"step-3": "<mask token>\n_Proveedor = Blueprint('Proveedor', __name__, url_prefix='/Proveedor')\n\n\n@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])\ndef proveedor():\n frm = form.Fr_Proveedor(request.form)\n if request.method == 'POST':\n pr = Proveedor.query.filter_by(CI=frm.CI.data).first()\n if frm.validate() and pr is None:\n new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.\n CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.\n data, convencional=frm.Convencional.data, Celular=frm.\n Celular.data)\n db.session.add(new_user)\n db.session.commit()\n flash('Se registrado con exito sus datos')\n return redirect(url_for('Proveedor.proveedor'))\n else:\n flash('Error: No se registrado con exito sus Datos')\n return render_template('Proveedor/frproveedor.html', frm=frm)\n\n\n@_Proveedor.route('/listaP')\ndef listaP():\n titulo = 'Lista Proveedor'\n return render_template('Proveedor/listaP.html', titulo=titulo, listas=\n Proveedor.query.all())\n\n\n@_Proveedor.route('/UpdateP', methods=['POST'])\ndef UpdateP():\n print(request.form)\n updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()\n print('ci:', updateP.CI)\n updateP.razonSolcial = request.form['RasonSocial']\n updateP.Direccion = request.form['Direccion']\n updateP.Correo = request.form['Correo']\n updateP.convencional = request.form['Convencional']\n updateP.Celular = request.form['Celular']\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/deleteP/<string:id>', methods=['GET', 'POST'])\ndef deleteP(id=None):\n dlTP = Proveedor.query.filter_by(CI=id).first()\n db.session.delete(dlTP)\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/modalP')\ndef modalP():\n frm = form.Fr_Proveedor(request.form)\n return render_template('modal/modaproveedor.html', frm=frm, title=\n 'Proveedor')\n",
"step-4": "from flask import Blueprint, Flask, render_template, request, redirect, url_for, flash\nfrom App import db\nfrom App.Modulos.Proveedor.model import Proveedor\nfrom App.Modulos.Proveedor import form\n_Proveedor = Blueprint('Proveedor', __name__, url_prefix='/Proveedor')\n\n\n@_Proveedor.route('/Proveedor', methods=['GET', 'POST'])\ndef proveedor():\n frm = form.Fr_Proveedor(request.form)\n if request.method == 'POST':\n pr = Proveedor.query.filter_by(CI=frm.CI.data).first()\n if frm.validate() and pr is None:\n new_user = Proveedor(razonSolcial=frm.RasonSocial.data, CI=frm.\n CI.data, Direccion=frm.Direccion.data, Correo=frm.Correo.\n data, convencional=frm.Convencional.data, Celular=frm.\n Celular.data)\n db.session.add(new_user)\n db.session.commit()\n flash('Se registrado con exito sus datos')\n return redirect(url_for('Proveedor.proveedor'))\n else:\n flash('Error: No se registrado con exito sus Datos')\n return render_template('Proveedor/frproveedor.html', frm=frm)\n\n\n@_Proveedor.route('/listaP')\ndef listaP():\n titulo = 'Lista Proveedor'\n return render_template('Proveedor/listaP.html', titulo=titulo, listas=\n Proveedor.query.all())\n\n\n@_Proveedor.route('/UpdateP', methods=['POST'])\ndef UpdateP():\n print(request.form)\n updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()\n print('ci:', updateP.CI)\n updateP.razonSolcial = request.form['RasonSocial']\n updateP.Direccion = request.form['Direccion']\n updateP.Correo = request.form['Correo']\n updateP.convencional = request.form['Convencional']\n updateP.Celular = request.form['Celular']\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/deleteP/<string:id>', methods=['GET', 'POST'])\ndef deleteP(id=None):\n dlTP = Proveedor.query.filter_by(CI=id).first()\n db.session.delete(dlTP)\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/modalP')\ndef modalP():\n frm = form.Fr_Proveedor(request.form)\n return render_template('modal/modaproveedor.html', frm=frm, title=\n 'Proveedor')\n",
"step-5": "#Importacion de Dependencias Flask\nfrom flask import Blueprint,Flask, render_template, request,redirect,url_for,flash\n#modelado de basedato.\nfrom App import db\n# Importacion de modulo de ModeloCliente\nfrom App.Modulos.Proveedor.model import Proveedor\n#Inportacion de modulo de formularioCliente\nfrom App.Modulos.Proveedor import form\n\n_Proveedor=Blueprint('Proveedor',__name__,url_prefix='/Proveedor')\n\n@_Proveedor.route('/Proveedor', methods=['GET', 'POST']) # registro de proveedor\ndef proveedor():\n frm = form.Fr_Proveedor(request.form)\n if request.method == 'POST':\n pr = Proveedor.query.filter_by(CI=frm.CI.data).first()\n if frm.validate() and pr is None:\n new_user = Proveedor(razonSolcial=frm.RasonSocial.data,\n CI=frm.CI.data,\n Direccion=frm.Direccion.data,\n Correo=frm.Correo.data,\n convencional=frm.Convencional.data,\n Celular=frm.Celular.data\n )\n db.session.add(new_user)\n db.session.commit()\n flash(\"Se registrado con exito sus datos\")\n return redirect(url_for('Proveedor.proveedor'))\n else:\n flash(\"Error: No se registrado con exito sus Datos\")\n return render_template('Proveedor/frproveedor.html', frm=frm)\n\n@_Proveedor.route('/listaP') # listado de Proveedores.\ndef listaP():\n titulo = \"Lista Proveedor\"\n return render_template(\"Proveedor/listaP.html\", titulo=titulo, listas=Proveedor.query.all())\n\n@_Proveedor.route('/UpdateP', methods=[ 'POST'])\ndef UpdateP():\n print(request.form)\n\n updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()\n print(\"ci:\",updateP.CI)\n updateP.razonSolcial = request.form['RasonSocial']\n updateP.Direccion = request.form['Direccion']\n updateP.Correo = request.form['Correo']\n updateP.convencional= request.form['Convencional']\n updateP.Celular = request.form['Celular']\n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n\n@_Proveedor.route('/deleteP/<string:id>',methods=['GET','POST'])\ndef deleteP(id=None):\n dlTP = Proveedor.query.filter_by(CI=id).first()\n db.session.delete(dlTP) \n db.session.commit()\n return redirect(url_for('Proveedor.listaP'))\n\n@_Proveedor.route(\"/modalP\")\ndef modalP():\n frm = form.Fr_Proveedor(request.form)\n return render_template(\"modal/modaproveedor.html\", frm=frm, title=\"Proveedor\")\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class LinearRegression:
def __init__(self):
self.coef_ = None
self.interception_ = None
self._theta = None
<|reserved_special_token_0|>
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):
assert X_train.shape[0] == y_train.shape[0], ''
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08
):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter
)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ''
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self, X_predict):
assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'
assert X_predict.shape[1] == len(self.coef_
), 'the feature number of X_predict must be equal to X_train'
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LinearRegression:
def __init__(self):
self.coef_ = None
self.interception_ = None
self._theta = None
<|reserved_special_token_0|>
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):
assert X_train.shape[0] == y_train.shape[0], ''
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08
):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter
)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ''
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self, X_predict):
assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'
assert X_predict.shape[1] == len(self.coef_
), 'the feature number of X_predict must be equal to X_train'
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
def score(self, X_test, y_test):
y_predict = self.predict(X_test)
return r2_score(y_test, y_predict)
def __repr__(self):
return 'LinearRegression()'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LinearRegression:
def __init__(self):
self.coef_ = None
self.interception_ = None
self._theta = None
def fit_normal(self, X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], ''
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):
assert X_train.shape[0] == y_train.shape[0], ''
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08
):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter
)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ''
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self, X_predict):
assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'
assert X_predict.shape[1] == len(self.coef_
), 'the feature number of X_predict must be equal to X_train'
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
def score(self, X_test, y_test):
y_predict = self.predict(X_test)
return r2_score(y_test, y_predict)
def __repr__(self):
return 'LinearRegression()'
<|reserved_special_token_1|>
import numpy as np
from .metrics import r2_score
class LinearRegression:
def __init__(self):
self.coef_ = None
self.interception_ = None
self._theta = None
def fit_normal(self, X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], ''
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):
assert X_train.shape[0] == y_train.shape[0], ''
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08
):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter
)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ''
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self, X_predict):
assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'
assert X_predict.shape[1] == len(self.coef_
), 'the feature number of X_predict must be equal to X_train'
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
def score(self, X_test, y_test):
y_predict = self.predict(X_test)
return r2_score(y_test, y_predict)
def __repr__(self):
return 'LinearRegression()'
<|reserved_special_token_1|>
import numpy as np
from .metrics import r2_score
class LinearRegression:
def __init__(self):
self.coef_ = None # 系数
self.interception_ = None # 截距
self._theta = None
def fit_normal(self, X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], ""
#!!!important
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=1e4):
assert X_train.shape[0] == y_train.shape[0], ""
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
# 向量化实现
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-8):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
# n_iter 代表观测所有数据几次
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ""
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
# Stochastic gradient descent
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self,X_predict):
assert self.interception_ is not None and self.coef_ is not None,\
"must fit before predict"
assert X_predict.shape[1] == len(self.coef_),\
"the feature number of X_predict must be equal to X_train"
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
def score(self,X_test,y_test):
y_predict = self.predict(X_test)
return r2_score(y_test,y_predict)
def __repr__(self):
return "LinearRegression()"
|
flexible
|
{
"blob_id": "e47e614c88c78fb6e8ff4098ea2b89d21bfa9684",
"index": 6935,
"step-1": "<mask token>\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n <mask token>\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n <mask token>\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return 'LinearRegression()'\n",
"step-3": "<mask token>\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n\n def fit_normal(self, X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], ''\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return 'LinearRegression()'\n",
"step-4": "import numpy as np\nfrom .metrics import r2_score\n\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n\n def fit_normal(self, X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], ''\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=10000.0):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-08\n ):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon:\n break\n i_iter += 1\n return theta\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter\n )\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n return self\n\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n assert X_train.shape[0] == y_train.shape[0], ''\n\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n return theta\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self, X_predict):\n assert self.interception_ is not None and self.coef_ is not None, 'must fit before predict'\n assert X_predict.shape[1] == len(self.coef_\n ), 'the feature number of X_predict must be equal to X_train'\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return 'LinearRegression()'\n",
"step-5": "import numpy as np\nfrom .metrics import r2_score\n\nclass LinearRegression:\n\n def __init__(self):\n self.coef_ = None # 系数\n self.interception_ = None # 截距\n self._theta = None\n\n def fit_normal(self, X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], \"\"\n\n #!!!important\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n def fit_gd(self, X_train, y_train, eta=0.01, n_iter=1e4):\n\n assert X_train.shape[0] == y_train.shape[0], \"\"\n def J(theta, X_b, y):\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n # 向量化实现\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-8):\n theta = initial_theta\n i_iter = 0\n while i_iter < n_iter:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n break\n i_iter += 1\n return theta\n\n X_b = np.hstack([np.ones((len(X_train), 1)), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n # n_iter 代表观测所有数据几次\n def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):\n\n assert X_train.shape[0] == y_train.shape[0], \"\"\n def dJ_sgd(theta, X_b_i, y_i):\n return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2\n\n # Stochastic gradient descent\n def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):\n\n def learning_rate(t):\n return t0 / (t + t1)\n\n theta = initial_theta\n m = len(X_b)\n for curr_iter in range(n_iter):\n indexes = np.random.permutation(m)\n X_b_new = X_b[indexes]\n y_new = y[indexes]\n for i in range(m):\n gradient = dJ_sgd(theta, X_b_new[i], y_new[i])\n theta = theta - learning_rate(curr_iter * m + i) * gradient\n\n return theta\n\n X_b = np.hstack([np.ones([len(X_train), 1]), X_train])\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = sgd(X_b, y_train, initial_theta, n_iter)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n def predict(self,X_predict):\n assert self.interception_ is not None and self.coef_ is not None,\\\n \"must fit before predict\"\n assert X_predict.shape[1] == len(self.coef_),\\\n \"the feature number of X_predict must be equal to X_train\"\n X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])\n y_predict = X_b.dot(self._theta)\n return y_predict\n\n def score(self,X_test,y_test):\n y_predict = self.predict(X_test)\n return r2_score(y_test,y_predict)\n\n def __repr__(self):\n return \"LinearRegression()\"",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
import sys
sys.stdin = open('10989.txt', 'r')
counting_list = [0 for _ in range(10001)]
N = int(sys.stdin.readline())
for n in range(N):
counting_list[int(sys.stdin.readline())] += 1
for i, v in enumerate(counting_list):
if v:
sys.stdout.write((str(i) + '\n') * v)
|
normal
|
{
"blob_id": "efca954e1977a6f6ac9a966b3c84ba80f5b7a663",
"index": 690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-3": "<mask token>\nsys.stdin = open('10989.txt', 'r')\ncounting_list = [(0) for _ in range(10001)]\nN = int(sys.stdin.readline())\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-4": "import sys\nsys.stdin = open('10989.txt', 'r')\ncounting_list = [(0) for _ in range(10001)]\nN = int(sys.stdin.readline())\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-5": "import sys\nsys.stdin = open('10989.txt', 'r')\n\ncounting_list = [0 for _ in range(10001)]\nN = int(sys.stdin.readline())\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\n\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
POWER_PIN = 21
SPICLK = 18
SPIMISO = 23
SPIMOSI = 24
SPICS = 25
PAUSE = 0.1
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
def spi_setup():
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
GPIO.setup(POWER_PIN, GPIO.OUT)
def spi_readout(adc_pin):
# read the analog pin
return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)
def power_on():
GPIO.output(POWER_PIN, True)
def power_off():
GPIO.output(POWER_PIN, False)
def adc_to_temp(readout):
millivolts = readout * (3300.0 / 1024.0)
temp_c = ((millivolts - 100.0) / 10.0) - 40.0
return temp_c
if __name__ == "__main__":
HYGROMETER = 0
TEMP = 1
LIGHT = 2
spi_setup()
power_on()
time.sleep(PAUSE)
print("Hygrometer value %d" % spi_readout(HYGROMETER))
power_off()
time.sleep(PAUSE)
temp = adc_to_temp(spi_readout(TEMP))
print("Temp sensor: %.1f C" % temp)
time.sleep(PAUSE)
light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0
print("Light level {}% ".format(light_level))
GPIO.cleanup()
|
normal
|
{
"blob_id": "fcdb43e36a4610ca0201a27d82b1a583f1482878",
"index": 8924,
"step-1": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\n<mask token>\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\n<mask token>\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n",
"step-4": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\nif __name__ == '__main__':\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print('Hygrometer value %d' % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print('Temp sensor: %.1f C' % temp)\n time.sleep(PAUSE)\n light_level = float(spi_readout(LIGHT)) / 1024.0 * 100.0\n print('Light level {}% '.format(light_level))\n GPIO.cleanup()\n",
"step-5": "import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nPOWER_PIN = 21\nSPICLK = 18\nSPIMISO = 23\nSPIMOSI = 24\nSPICS = 25\n\nPAUSE = 0.1\n\n# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if ((adcnum > 7) or (adcnum < 0)):\n return -1\n GPIO.output(cspin, True)\n\n GPIO.output(clockpin, False) # start clock low\n GPIO.output(cspin, False) # bring CS low\n\n commandout = adcnum\n commandout |= 0x18 # start bit + single-ended bit\n commandout <<= 3 # we only need to send 5 bits here\n for i in range(5):\n if (commandout & 0x80):\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n\n adcout = 0\n # read in one empty bit, one null bit and 10 ADC bits\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if (GPIO.input(misopin)):\n adcout |= 0x1\n\n GPIO.output(cspin, True)\n\n adcout >>= 1 # first bit is 'null' so drop it\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n # read the analog pin\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = ((millivolts - 100.0) / 10.0) - 40.0\n return temp_c\n\nif __name__ == \"__main__\":\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print(\"Hygrometer value %d\" % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print(\"Temp sensor: %.1f C\" % temp)\n time.sleep(PAUSE)\n light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0\n print(\"Light level {}% \".format(light_level))\n GPIO.cleanup()\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cmdline.execute('scrapy crawl rapo.com'.split())
<|reserved_special_token_1|>
from scrapy import cmdline
cmdline.execute('scrapy crawl rapo.com'.split())
<|reserved_special_token_1|>
from scrapy import cmdline
cmdline.execute("scrapy crawl rapo.com".split())
|
flexible
|
{
"blob_id": "326f1b5bee8f488382a76fcc5559f4ea13734f21",
"index": 6551,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncmdline.execute('scrapy crawl rapo.com'.split())\n",
"step-3": "from scrapy import cmdline\ncmdline.execute('scrapy crawl rapo.com'.split())\n",
"step-4": "from scrapy import cmdline\ncmdline.execute(\"scrapy crawl rapo.com\".split())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import serial
from settings import *
class CommunicationController:
def __init__(self):
global board
board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS, timeout=0)
self.count = 0
print("Communication controller")
def sendCommand(self, right, back, left):
self.count += 1
if self.count >= BUFFER_RESET_BOUND:
board.reset_output_buffer()
board.reset_input_buffer()
self.count = 0
#format:
#sd:BACKWHEEL:RIGHTWHEEL:LEFTWHEEL\n
command = ":".join(("sd", str(right), str(left), str(back) ))
if board.is_open:
board.write(command + '\n')
# print(command)
def throwBall(self, value):
if board.is_open:
command = ":".join(("d",str(value)))
print(command)
board.write(command + '\r\n')
print("Throw")
else:
print("No board")
|
normal
|
{
"blob_id": "48291ab3deb1ca1ba672d3e642d55635a7270171",
"index": 955,
"step-1": "<mask token>\n\n\nclass CommunicationController:\n <mask token>\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n command = ':'.join(('sd', str(right), str(left), str(back)))\n if board.is_open:\n board.write(command + '\\n')\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CommunicationController:\n\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS,\n timeout=0)\n self.count = 0\n print('Communication controller')\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n command = ':'.join(('sd', str(right), str(left), str(back)))\n if board.is_open:\n board.write(command + '\\n')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CommunicationController:\n\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS,\n timeout=0)\n self.count = 0\n print('Communication controller')\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n command = ':'.join(('sd', str(right), str(left), str(back)))\n if board.is_open:\n board.write(command + '\\n')\n\n def throwBall(self, value):\n if board.is_open:\n command = ':'.join(('d', str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print('Throw')\n else:\n print('No board')\n",
"step-4": "import serial\nfrom settings import *\n\n\nclass CommunicationController:\n\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS,\n timeout=0)\n self.count = 0\n print('Communication controller')\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n command = ':'.join(('sd', str(right), str(left), str(back)))\n if board.is_open:\n board.write(command + '\\n')\n\n def throwBall(self, value):\n if board.is_open:\n command = ':'.join(('d', str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print('Throw')\n else:\n print('No board')\n",
"step-5": "import serial\nfrom settings import *\nclass CommunicationController:\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS, timeout=0)\n self.count = 0\n print(\"Communication controller\")\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n #format:\n #sd:BACKWHEEL:RIGHTWHEEL:LEFTWHEEL\\n\n command = \":\".join((\"sd\", str(right), str(left), str(back) ))\n if board.is_open:\n board.write(command + '\\n')\n # print(command)\n\n def throwBall(self, value):\n if board.is_open:\n command = \":\".join((\"d\",str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print(\"Throw\")\n else:\n print(\"No board\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import requests
from PIL import Image
from io import BytesIO
import csv
from typing import Iterable, List, Tuple, Dict, Callable, Union, Collection
# pull the image from the api endpoint and save it if we don't have it, else load it from disk
def get_img_from_file_or_url(img_format: str = 'JPEG') -> Callable[[str, str], Image.Image]:
def _apply(filepath: str, url: str) -> Image.Image:
img = from_file(filepath)
if img is None:
img = from_url(url)
img.save(filepath, img_format)
return img.convert('RGB') # convert to rgb if not already (eg if grayscale)
return _apply
def from_url(url: str) -> Image.Image:
api_response = requests.get(url).content
response_bytes = BytesIO(api_response)
return Image.open(response_bytes)
def from_file(path: str) -> Union[Image.Image, None]:
if os.path.exists(path):
return Image.open(path)
else:
return None
def load_metadata(path: str, cols: Iterable[int], class_cols: Collection[int] = tuple(), valid_only: bool = True, **reader_args)\
-> Tuple[List, int, List, List[Dict[str, int]], List[Dict[int, str]], int]:
metadata = []
# one dict for each class col
class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)
index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)
next_indices = [0] * len(class_cols) # next index for a new class value
with open(path, 'r', newline='', encoding="utf8") as metadata_file:
reader = csv.reader(metadata_file, **reader_args)
headers = next(reader)
for row in reader:
if len(row) != 0:
metadatum = [row[c] for c in cols]
# for all class cols, add their vals to the class_to_index and index_to_class dicts if not there already
for c, class_col in enumerate(class_cols):
if not row[class_col] in class_to_index[c]:
class_to_index[c][row[class_col]] = next_indices[c]
index_to_class[c][next_indices[c]] = row[class_col]
next_indices[c] += 1
if valid_only and '' in metadatum:
continue
metadata.append(metadatum)
len_metadata = len(metadata)
num_classes = 0 if len(next_indices) == 0 else next_indices[-1]
# split off the headers
return metadata, len_metadata, headers, class_to_index, index_to_class, num_classes
|
normal
|
{
"blob_id": "f2bb44600f011a205c71985ad94c18f7e058634f",
"index": 8,
"step-1": "<mask token>\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],\n Image.Image]:\n\n def _apply(filepath: str, url: str) ->Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB')\n return _apply\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],\n Image.Image]:\n\n def _apply(filepath: str, url: str) ->Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB')\n return _apply\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[\n int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,\n List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols)\n with open(path, 'r', newline='', encoding='utf8') as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n return (metadata, len_metadata, headers, class_to_index, index_to_class,\n num_classes)\n",
"step-4": "import os\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport csv\nfrom typing import Iterable, List, Tuple, Dict, Callable, Union, Collection\n\n\ndef get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],\n Image.Image]:\n\n def _apply(filepath: str, url: str) ->Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB')\n return _apply\n\n\ndef from_url(url: str) ->Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) ->Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[\n int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,\n List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols)\n with open(path, 'r', newline='', encoding='utf8') as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n return (metadata, len_metadata, headers, class_to_index, index_to_class,\n num_classes)\n",
"step-5": "import os\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport csv\nfrom typing import Iterable, List, Tuple, Dict, Callable, Union, Collection\n\n\n# pull the image from the api endpoint and save it if we don't have it, else load it from disk\ndef get_img_from_file_or_url(img_format: str = 'JPEG') -> Callable[[str, str], Image.Image]:\n def _apply(filepath: str, url: str) -> Image.Image:\n img = from_file(filepath)\n if img is None:\n img = from_url(url)\n img.save(filepath, img_format)\n return img.convert('RGB') # convert to rgb if not already (eg if grayscale)\n return _apply\n\n\ndef from_url(url: str) -> Image.Image:\n api_response = requests.get(url).content\n response_bytes = BytesIO(api_response)\n return Image.open(response_bytes)\n\n\ndef from_file(path: str) -> Union[Image.Image, None]:\n if os.path.exists(path):\n return Image.open(path)\n else:\n return None\n\n\ndef load_metadata(path: str, cols: Iterable[int], class_cols: Collection[int] = tuple(), valid_only: bool = True, **reader_args)\\\n -> Tuple[List, int, List, List[Dict[str, int]], List[Dict[int, str]], int]:\n metadata = []\n # one dict for each class col\n class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)\n index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)\n next_indices = [0] * len(class_cols) # next index for a new class value\n with open(path, 'r', newline='', encoding=\"utf8\") as metadata_file:\n reader = csv.reader(metadata_file, **reader_args)\n headers = next(reader)\n for row in reader:\n if len(row) != 0:\n metadatum = [row[c] for c in cols]\n # for all class cols, add their vals to the class_to_index and index_to_class dicts if not there already\n for c, class_col in enumerate(class_cols):\n if not row[class_col] in class_to_index[c]:\n class_to_index[c][row[class_col]] = next_indices[c]\n index_to_class[c][next_indices[c]] = row[class_col]\n next_indices[c] += 1\n if valid_only and '' in metadatum:\n continue\n metadata.append(metadatum)\n len_metadata = len(metadata)\n num_classes = 0 if len(next_indices) == 0 else next_indices[-1]\n # split off the headers\n return metadata, len_metadata, headers, class_to_index, index_to_class, num_classes\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#calss header
class _WATERWAYS():
def __init__(self,):
self.name = "WATERWAYS"
self.definitions = waterway
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waterway']
|
normal
|
{
"blob_id": "33daf5753b27f6b4bcb7c98e28cf2168e7f0b403",
"index": 9541,
"step-1": "<mask token>\n",
"step-2": "class _WATERWAYS:\n <mask token>\n",
"step-3": "class _WATERWAYS:\n\n def __init__(self):\n self.name = 'WATERWAYS'\n self.definitions = waterway\n self.parents = []\n self.childen = []\n self.properties = []\n self.jsondata = {}\n self.basic = ['waterway']\n",
"step-4": "\n\n#calss header\nclass _WATERWAYS():\n\tdef __init__(self,): \n\t\tself.name = \"WATERWAYS\"\n\t\tself.definitions = waterway\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.basic = ['waterway']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Cell:
<|reserved_special_token_0|>
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = 0, 0, 0
if self.bombs_around == 1:
text_color = 0, 0, 150
if self.bombs_around == 2:
text_color = 0, 150, 0
if self.bombs_around == 3:
text_color = 150, 0, 0
if self.bombs_around == 4:
text_color = 133, 39, 138
if self.bombs_around == 5:
text_color = 128, 0, 0
if self.bombs_around == 6:
text_color = 175, 238, 238
if self.bombs_around == 7:
text_color = 0, 0, 0
if self.bombs_around == 8:
text_color = 33, 161, 166
font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
<|reserved_special_token_0|>
def draw_cell(self):
pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -
1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.
y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y,
TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell:
def __init__(self, game, x, y, bombs):
self.game = game
self.x = x
self.y = y
self.i = x // TILESIZE
self.j = y // TILESIZE
self.revelada = False
self.bomba = False
self.bombas_total = bombs
self.bombs_around = 0
self.flag_enabled = False
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
def flood(self):
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(self.game.grid) and j > -1 and j < len(
self.game.grid[1]):
neighbor = self.game.grid[i][j]
if (not neighbor.revelada and not neighbor.flag_enabled and
not self.game.is_game_over):
neighbor.reveal()
def enable_flag(self):
self.flag_enabled = not self.flag_enabled
if self.bomba:
self.game.score += 1
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = 0, 0, 0
if self.bombs_around == 1:
text_color = 0, 0, 150
if self.bombs_around == 2:
text_color = 0, 150, 0
if self.bombs_around == 3:
text_color = 150, 0, 0
if self.bombs_around == 4:
text_color = 133, 39, 138
if self.bombs_around == 5:
text_color = 128, 0, 0
if self.bombs_around == 6:
text_color = 175, 238, 238
if self.bombs_around == 7:
text_color = 0, 0, 0
if self.bombs_around == 8:
text_color = 33, 161, 166
font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
<|reserved_special_token_0|>
def draw_cell(self):
pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -
1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.
y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y,
TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell:
def __init__(self, game, x, y, bombs):
self.game = game
self.x = x
self.y = y
self.i = x // TILESIZE
self.j = y // TILESIZE
self.revelada = False
self.bomba = False
self.bombas_total = bombs
self.bombs_around = 0
self.flag_enabled = False
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
def flood(self):
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(self.game.grid) and j > -1 and j < len(
self.game.grid[1]):
neighbor = self.game.grid[i][j]
if (not neighbor.revelada and not neighbor.flag_enabled and
not self.game.is_game_over):
neighbor.reveal()
def enable_flag(self):
self.flag_enabled = not self.flag_enabled
if self.bomba:
self.game.score += 1
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = 0, 0, 0
if self.bombs_around == 1:
text_color = 0, 0, 150
if self.bombs_around == 2:
text_color = 0, 150, 0
if self.bombs_around == 3:
text_color = 150, 0, 0
if self.bombs_around == 4:
text_color = 133, 39, 138
if self.bombs_around == 5:
text_color = 128, 0, 0
if self.bombs_around == 6:
text_color = 175, 238, 238
if self.bombs_around == 7:
text_color = 0, 0, 0
if self.bombs_around == 8:
text_color = 33, 161, 166
font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
def set_bomb(self):
"""
This function will turn this cell into a cell with a bomb
(just to keep organized)
"""
self.bomba = True
def draw_cell(self):
pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -
1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.
y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y,
TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
<|reserved_special_token_1|>
import pygame
from settings import *
import random
class Cell:
def __init__(self, game, x, y, bombs):
self.game = game
self.x = x
self.y = y
self.i = x // TILESIZE
self.j = y // TILESIZE
self.revelada = False
self.bomba = False
self.bombas_total = bombs
self.bombs_around = 0
self.flag_enabled = False
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
def flood(self):
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(self.game.grid) and j > -1 and j < len(
self.game.grid[1]):
neighbor = self.game.grid[i][j]
if (not neighbor.revelada and not neighbor.flag_enabled and
not self.game.is_game_over):
neighbor.reveal()
def enable_flag(self):
self.flag_enabled = not self.flag_enabled
if self.bomba:
self.game.score += 1
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = 0, 0, 0
if self.bombs_around == 1:
text_color = 0, 0, 150
if self.bombs_around == 2:
text_color = 0, 150, 0
if self.bombs_around == 3:
text_color = 150, 0, 0
if self.bombs_around == 4:
text_color = 133, 39, 138
if self.bombs_around == 5:
text_color = 128, 0, 0
if self.bombs_around == 6:
text_color = 175, 238, 238
if self.bombs_around == 7:
text_color = 0, 0, 0
if self.bombs_around == 8:
text_color = 33, 161, 166
font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
def set_bomb(self):
"""
This function will turn this cell into a cell with a bomb
(just to keep organized)
"""
self.bomba = True
def draw_cell(self):
pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -
1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.
y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y,
TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
<|reserved_special_token_1|>
import pygame
from settings import *
import random
class Cell:
def __init__(self, game, x, y, bombs):
self.game = game
self.x = x
self.y = y
self.i = x // TILESIZE
self.j = y // TILESIZE
self.revelada = False
self.bomba = False
self.bombas_total = bombs
self.bombs_around = 0
self.flag_enabled = False
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
def flood(self):
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(self.game.grid) and j > -1 and j < len(self.game.grid[1]):
neighbor = self.game.grid[i][j]
if not neighbor.revelada and not neighbor.flag_enabled and not self.game.is_game_over:
neighbor.reveal()
def enable_flag(self):
self.flag_enabled = not self.flag_enabled
if self.bomba: # TODO: and self.flag_enabled
self.game.score += 1
# TODO: else: self.game.score -= 1
# all the spots revealed shouldn't be a bomb
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = (0, 0, 0)
if self.bombs_around == 1:
text_color = (0, 0, 150)
if self.bombs_around == 2:
text_color = (0, 150, 0)
if self.bombs_around == 3:
text_color = (150, 0, 0)
if self.bombs_around == 4:
text_color = (133, 39, 138)
if self.bombs_around == 5:
text_color = (128, 0, 0)
if self.bombs_around == 6:
text_color = (175, 238, 238)
if self.bombs_around == 7:
text_color = (0, 0, 0)
if self.bombs_around == 8:
text_color = (33, 161, 166)
font = pygame.font.Font("fonts/JetBrainsMono-Bold.ttf", 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(
str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
def set_bomb(self):
"""
This function will turn this cell into a cell with a bomb
(just to keep organized)
"""
self.bomba = True
def draw_cell(self):
pygame.draw.rect(
self.game.screen, WHITE, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(
self.game.screen, RED, (self.x + 10, self.y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(
self.game.screen, GRAY, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
|
flexible
|
{
"blob_id": "e31f1e24c319f338d728661dfd50e758526112d6",
"index": 7796,
"step-1": "<mask token>\n\n\nclass Cell:\n <mask token>\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n <mask token>\n <mask token>\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n <mask token>\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-2": "<mask token>\n\n\nclass Cell:\n\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(\n self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n if (not neighbor.revelada and not neighbor.flag_enabled and\n not self.game.is_game_over):\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba:\n self.game.score += 1\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n <mask token>\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-3": "<mask token>\n\n\nclass Cell:\n\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(\n self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n if (not neighbor.revelada and not neighbor.flag_enabled and\n not self.game.is_game_over):\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba:\n self.game.score += 1\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n\n def set_bomb(self):\n \"\"\"\n This function will turn this cell into a cell with a bomb \n (just to keep organized)\n \"\"\"\n self.bomba = True\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-4": "import pygame\nfrom settings import *\nimport random\n\n\nclass Cell:\n\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(\n self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n if (not neighbor.revelada and not neighbor.flag_enabled and\n not self.game.is_game_over):\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba:\n self.game.score += 1\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n\n def set_bomb(self):\n \"\"\"\n This function will turn this cell into a cell with a bomb \n (just to keep organized)\n \"\"\"\n self.bomba = True\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-5": "import pygame\nfrom settings import *\nimport random\n\n\nclass Cell:\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n\n if not neighbor.revelada and not neighbor.flag_enabled and not self.game.is_game_over:\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba: # TODO: and self.flag_enabled\n self.game.score += 1\n # TODO: else: self.game.score -= 1\n # all the spots revealed shouldn't be a bomb\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = (0, 0, 0)\n if self.bombs_around == 1:\n text_color = (0, 0, 150)\n if self.bombs_around == 2:\n text_color = (0, 150, 0)\n if self.bombs_around == 3:\n text_color = (150, 0, 0)\n if self.bombs_around == 4:\n text_color = (133, 39, 138)\n if self.bombs_around == 5:\n text_color = (128, 0, 0)\n if self.bombs_around == 6:\n text_color = (175, 238, 238)\n if self.bombs_around == 7:\n text_color = (0, 0, 0)\n if self.bombs_around == 8:\n text_color = (33, 161, 166)\n\n font = pygame.font.Font(\"fonts/JetBrainsMono-Bold.ttf\", 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(\n str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n\n def set_bomb(self):\n \"\"\"\n This function will turn this cell into a cell with a bomb \n (just to keep organized)\n \"\"\"\n self.bomba = True\n\n def draw_cell(self):\n\n pygame.draw.rect(\n self.game.screen, WHITE, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))\n\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(\n self.game.screen, RED, (self.x + 10, self.y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(\n self.game.screen, GRAY, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-ids": [
6,
9,
10,
11,
12
]
}
|
[
6,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert len(sys.argv) == 2
<|reserved_special_token_0|>
print("""Converting maggies from catalog
%s""" % fname)
<|reserved_special_token_0|>
np.savetxt('./output/maggies.txt', to_exp)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert len(sys.argv) == 2
fname = sys.argv[1]
print("""Converting maggies from catalog
%s""" % fname)
df = pd.read_csv(fname)
z = df['z'].values
mod_u = df['cModelMag_u'].values
mod_g = df['cModelMag_g'].values
mod_r = df['cModelMag_r'].values
mod_i = df['cModelMag_i'].values
mod_z = df['cModelMag_z'].values
ext_u = df['extinction_u'].values
ext_g = df['extinction_g'].values
ext_r = df['extinction_r'].values
ext_i = df['extinction_i'].values
ext_z = df['extinction_z'].values
err_u = df['cModelMagErr_u'].values
err_g = df['cModelMagErr_g'].values
err_r = df['cModelMagErr_r'].values
err_i = df['cModelMagErr_i'].values
err_z = df['cModelMagErr_z'].values
dered_u = mod_u - ext_u
dered_g = mod_g - ext_g
dered_r = mod_r - ext_r
dered_i = mod_i - ext_i
dered_z = mod_z - ext_z
b = np.array([1.4, 0.9, 1.2, 1.8, 7.4]) * 1e-10
flux_u = 2.0 * b[0] * np.sinh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0]))
flux_g = 2.0 * b[1] * np.sinh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1]))
flux_r = 2.0 * b[2] * np.sinh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2]))
flux_i = 2.0 * b[3] * np.sinh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3]))
flux_z = 2.0 * b[4] * np.sinh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4]))
ivar_u = 2.0 * b[0] * np.cosh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0])) * (
-np.log(10) / 2.5) * err_u
ivar_g = 2.0 * b[1] * np.cosh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1])) * (
-np.log(10) / 2.5) * err_g
ivar_r = 2.0 * b[2] * np.cosh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2])) * (
-np.log(10) / 2.5) * err_r
ivar_i = 2.0 * b[3] * np.cosh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3])) * (
-np.log(10) / 2.5) * err_i
ivar_z = 2.0 * b[4] * np.cosh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4])) * (
-np.log(10) / 2.5) * err_z
ivar_u = 1.0 / ivar_u ** 2.0
ivar_g = 1.0 / ivar_g ** 2.0
ivar_r = 1.0 / ivar_r ** 2.0
ivar_i = 1.0 / ivar_i ** 2.0
ivar_z = 1.0 / ivar_z ** 2.0
to_exp = np.transpose([z, flux_u, flux_g, flux_r, flux_i, flux_z, ivar_u,
ivar_g, ivar_r, ivar_i, ivar_z])
np.savetxt('./output/maggies.txt', to_exp)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import pandas as pd
import sys
assert len(sys.argv) == 2
fname = sys.argv[1]
print("""Converting maggies from catalog
%s""" % fname)
df = pd.read_csv(fname)
z = df['z'].values
mod_u = df['cModelMag_u'].values
mod_g = df['cModelMag_g'].values
mod_r = df['cModelMag_r'].values
mod_i = df['cModelMag_i'].values
mod_z = df['cModelMag_z'].values
ext_u = df['extinction_u'].values
ext_g = df['extinction_g'].values
ext_r = df['extinction_r'].values
ext_i = df['extinction_i'].values
ext_z = df['extinction_z'].values
err_u = df['cModelMagErr_u'].values
err_g = df['cModelMagErr_g'].values
err_r = df['cModelMagErr_r'].values
err_i = df['cModelMagErr_i'].values
err_z = df['cModelMagErr_z'].values
dered_u = mod_u - ext_u
dered_g = mod_g - ext_g
dered_r = mod_r - ext_r
dered_i = mod_i - ext_i
dered_z = mod_z - ext_z
b = np.array([1.4, 0.9, 1.2, 1.8, 7.4]) * 1e-10
flux_u = 2.0 * b[0] * np.sinh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0]))
flux_g = 2.0 * b[1] * np.sinh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1]))
flux_r = 2.0 * b[2] * np.sinh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2]))
flux_i = 2.0 * b[3] * np.sinh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3]))
flux_z = 2.0 * b[4] * np.sinh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4]))
ivar_u = 2.0 * b[0] * np.cosh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0])) * (
-np.log(10) / 2.5) * err_u
ivar_g = 2.0 * b[1] * np.cosh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1])) * (
-np.log(10) / 2.5) * err_g
ivar_r = 2.0 * b[2] * np.cosh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2])) * (
-np.log(10) / 2.5) * err_r
ivar_i = 2.0 * b[3] * np.cosh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3])) * (
-np.log(10) / 2.5) * err_i
ivar_z = 2.0 * b[4] * np.cosh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4])) * (
-np.log(10) / 2.5) * err_z
ivar_u = 1.0 / ivar_u ** 2.0
ivar_g = 1.0 / ivar_g ** 2.0
ivar_r = 1.0 / ivar_r ** 2.0
ivar_i = 1.0 / ivar_i ** 2.0
ivar_z = 1.0 / ivar_z ** 2.0
to_exp = np.transpose([z, flux_u, flux_g, flux_r, flux_i, flux_z, ivar_u,
ivar_g, ivar_r, ivar_i, ivar_z])
np.savetxt('./output/maggies.txt', to_exp)
<|reserved_special_token_1|>
''' Converts luptitudes to maggies and stores in folder output
Written by P. Gallardo
'''
import numpy as np
import pandas as pd
import sys
assert len(sys.argv) == 2 # usage: lups2maggies.py /path/to/cat.csv
fname = sys.argv[1]
print("Converting maggies from catalog \n%s" % fname)
df = pd.read_csv(fname)
z = df['z'].values
mod_u = df['cModelMag_u'].values
mod_g = df['cModelMag_g'].values
mod_r = df['cModelMag_r'].values
mod_i = df['cModelMag_i'].values
mod_z = df['cModelMag_z'].values
ext_u = df['extinction_u'].values
ext_g = df['extinction_g'].values
ext_r = df['extinction_r'].values
ext_i = df['extinction_i'].values
ext_z = df['extinction_z'].values
err_u = df['cModelMagErr_u'].values
err_g = df['cModelMagErr_g'].values
err_r = df['cModelMagErr_r'].values
err_i = df['cModelMagErr_i'].values
err_z = df['cModelMagErr_z'].values
dered_u = mod_u - ext_u
dered_g = mod_g - ext_g
dered_r = mod_r - ext_r
dered_i = mod_i - ext_i
dered_z = mod_z - ext_z
b = np.array([1.4, 0.9, 1.2, 1.8, 7.4]) * 1e-10
flux_u = 2.*b[0] * np.sinh(-np.log(10.)/2.5*dered_u-np.log(b[0]))
flux_g = 2.*b[1] * np.sinh(-np.log(10.)/2.5*dered_g-np.log(b[1]))
flux_r = 2.*b[2] * np.sinh(-np.log(10.)/2.5*dered_r-np.log(b[2]))
flux_i = 2.*b[3] * np.sinh(-np.log(10.)/2.5*dered_i-np.log(b[3]))
flux_z = 2.*b[4] * np.sinh(-np.log(10.)/2.5*dered_z-np.log(b[4]))
ivar_u = 2.*b[0]*np.cosh(-np.log(10.)/2.5*dered_u-np.log(b[0]))*(-np.log(10)/2.5)*err_u # noqa
ivar_g = 2.*b[1]*np.cosh(-np.log(10.)/2.5*dered_g-np.log(b[1]))*(-np.log(10)/2.5)*err_g # noqa
ivar_r = 2.*b[2]*np.cosh(-np.log(10.)/2.5*dered_r-np.log(b[2]))*(-np.log(10)/2.5)*err_r # noqa
ivar_i = 2.*b[3]*np.cosh(-np.log(10.)/2.5*dered_i-np.log(b[3]))*(-np.log(10)/2.5)*err_i # noqa
ivar_z = 2.*b[4]*np.cosh(-np.log(10.)/2.5*dered_z-np.log(b[4]))*(-np.log(10)/2.5)*err_z # noqa
ivar_u = 1./ivar_u**2.
ivar_g = 1./ivar_g**2.
ivar_r = 1./ivar_r**2.
ivar_i = 1./ivar_i**2.
ivar_z = 1./ivar_z**2.
to_exp = np.transpose([z, flux_u, flux_g, flux_r, flux_i, flux_z,
ivar_u, ivar_g, ivar_r, ivar_i, ivar_z])
np.savetxt('./output/maggies.txt',
to_exp)
|
flexible
|
{
"blob_id": "e8971b3d183ded99a5fc03f031ef807280b8cc7f",
"index": 1744,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nassert len(sys.argv) == 2\n<mask token>\nprint(\"\"\"Converting maggies from catalog \n%s\"\"\" % fname)\n<mask token>\nnp.savetxt('./output/maggies.txt', to_exp)\n",
"step-3": "<mask token>\nassert len(sys.argv) == 2\nfname = sys.argv[1]\nprint(\"\"\"Converting maggies from catalog \n%s\"\"\" % fname)\ndf = pd.read_csv(fname)\nz = df['z'].values\nmod_u = df['cModelMag_u'].values\nmod_g = df['cModelMag_g'].values\nmod_r = df['cModelMag_r'].values\nmod_i = df['cModelMag_i'].values\nmod_z = df['cModelMag_z'].values\next_u = df['extinction_u'].values\next_g = df['extinction_g'].values\next_r = df['extinction_r'].values\next_i = df['extinction_i'].values\next_z = df['extinction_z'].values\nerr_u = df['cModelMagErr_u'].values\nerr_g = df['cModelMagErr_g'].values\nerr_r = df['cModelMagErr_r'].values\nerr_i = df['cModelMagErr_i'].values\nerr_z = df['cModelMagErr_z'].values\ndered_u = mod_u - ext_u\ndered_g = mod_g - ext_g\ndered_r = mod_r - ext_r\ndered_i = mod_i - ext_i\ndered_z = mod_z - ext_z\nb = np.array([1.4, 0.9, 1.2, 1.8, 7.4]) * 1e-10\nflux_u = 2.0 * b[0] * np.sinh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0]))\nflux_g = 2.0 * b[1] * np.sinh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1]))\nflux_r = 2.0 * b[2] * np.sinh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2]))\nflux_i = 2.0 * b[3] * np.sinh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3]))\nflux_z = 2.0 * b[4] * np.sinh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4]))\nivar_u = 2.0 * b[0] * np.cosh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0])) * (\n -np.log(10) / 2.5) * err_u\nivar_g = 2.0 * b[1] * np.cosh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1])) * (\n -np.log(10) / 2.5) * err_g\nivar_r = 2.0 * b[2] * np.cosh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2])) * (\n -np.log(10) / 2.5) * err_r\nivar_i = 2.0 * b[3] * np.cosh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3])) * (\n -np.log(10) / 2.5) * err_i\nivar_z = 2.0 * b[4] * np.cosh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4])) * (\n -np.log(10) / 2.5) * err_z\nivar_u = 1.0 / ivar_u ** 2.0\nivar_g = 1.0 / ivar_g ** 2.0\nivar_r = 1.0 / ivar_r ** 2.0\nivar_i = 1.0 / ivar_i ** 2.0\nivar_z = 1.0 / ivar_z ** 2.0\nto_exp = np.transpose([z, flux_u, flux_g, flux_r, flux_i, flux_z, ivar_u,\n ivar_g, ivar_r, ivar_i, ivar_z])\nnp.savetxt('./output/maggies.txt', to_exp)\n",
"step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\nimport sys\nassert len(sys.argv) == 2\nfname = sys.argv[1]\nprint(\"\"\"Converting maggies from catalog \n%s\"\"\" % fname)\ndf = pd.read_csv(fname)\nz = df['z'].values\nmod_u = df['cModelMag_u'].values\nmod_g = df['cModelMag_g'].values\nmod_r = df['cModelMag_r'].values\nmod_i = df['cModelMag_i'].values\nmod_z = df['cModelMag_z'].values\next_u = df['extinction_u'].values\next_g = df['extinction_g'].values\next_r = df['extinction_r'].values\next_i = df['extinction_i'].values\next_z = df['extinction_z'].values\nerr_u = df['cModelMagErr_u'].values\nerr_g = df['cModelMagErr_g'].values\nerr_r = df['cModelMagErr_r'].values\nerr_i = df['cModelMagErr_i'].values\nerr_z = df['cModelMagErr_z'].values\ndered_u = mod_u - ext_u\ndered_g = mod_g - ext_g\ndered_r = mod_r - ext_r\ndered_i = mod_i - ext_i\ndered_z = mod_z - ext_z\nb = np.array([1.4, 0.9, 1.2, 1.8, 7.4]) * 1e-10\nflux_u = 2.0 * b[0] * np.sinh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0]))\nflux_g = 2.0 * b[1] * np.sinh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1]))\nflux_r = 2.0 * b[2] * np.sinh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2]))\nflux_i = 2.0 * b[3] * np.sinh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3]))\nflux_z = 2.0 * b[4] * np.sinh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4]))\nivar_u = 2.0 * b[0] * np.cosh(-np.log(10.0) / 2.5 * dered_u - np.log(b[0])) * (\n -np.log(10) / 2.5) * err_u\nivar_g = 2.0 * b[1] * np.cosh(-np.log(10.0) / 2.5 * dered_g - np.log(b[1])) * (\n -np.log(10) / 2.5) * err_g\nivar_r = 2.0 * b[2] * np.cosh(-np.log(10.0) / 2.5 * dered_r - np.log(b[2])) * (\n -np.log(10) / 2.5) * err_r\nivar_i = 2.0 * b[3] * np.cosh(-np.log(10.0) / 2.5 * dered_i - np.log(b[3])) * (\n -np.log(10) / 2.5) * err_i\nivar_z = 2.0 * b[4] * np.cosh(-np.log(10.0) / 2.5 * dered_z - np.log(b[4])) * (\n -np.log(10) / 2.5) * err_z\nivar_u = 1.0 / ivar_u ** 2.0\nivar_g = 1.0 / ivar_g ** 2.0\nivar_r = 1.0 / ivar_r ** 2.0\nivar_i = 1.0 / ivar_i ** 2.0\nivar_z = 1.0 / ivar_z ** 2.0\nto_exp = np.transpose([z, flux_u, flux_g, flux_r, flux_i, flux_z, ivar_u,\n ivar_g, ivar_r, ivar_i, ivar_z])\nnp.savetxt('./output/maggies.txt', to_exp)\n",
"step-5": "''' Converts luptitudes to maggies and stores in folder output\n Written by P. Gallardo\n'''\nimport numpy as np\nimport pandas as pd\nimport sys\n\nassert len(sys.argv) == 2 # usage: lups2maggies.py /path/to/cat.csv\nfname = sys.argv[1]\n\nprint(\"Converting maggies from catalog \\n%s\" % fname)\n\ndf = pd.read_csv(fname)\n\nz = df['z'].values\n\nmod_u = df['cModelMag_u'].values\nmod_g = df['cModelMag_g'].values\nmod_r = df['cModelMag_r'].values\nmod_i = df['cModelMag_i'].values\nmod_z = df['cModelMag_z'].values\n\next_u = df['extinction_u'].values\next_g = df['extinction_g'].values\next_r = df['extinction_r'].values\next_i = df['extinction_i'].values\next_z = df['extinction_z'].values\n\nerr_u = df['cModelMagErr_u'].values\nerr_g = df['cModelMagErr_g'].values\nerr_r = df['cModelMagErr_r'].values\nerr_i = df['cModelMagErr_i'].values\nerr_z = df['cModelMagErr_z'].values\n\n\ndered_u = mod_u - ext_u\ndered_g = mod_g - ext_g\ndered_r = mod_r - ext_r\ndered_i = mod_i - ext_i\ndered_z = mod_z - ext_z\n\nb = np.array([1.4, 0.9, 1.2, 1.8, 7.4]) * 1e-10\nflux_u = 2.*b[0] * np.sinh(-np.log(10.)/2.5*dered_u-np.log(b[0]))\nflux_g = 2.*b[1] * np.sinh(-np.log(10.)/2.5*dered_g-np.log(b[1]))\nflux_r = 2.*b[2] * np.sinh(-np.log(10.)/2.5*dered_r-np.log(b[2]))\nflux_i = 2.*b[3] * np.sinh(-np.log(10.)/2.5*dered_i-np.log(b[3]))\nflux_z = 2.*b[4] * np.sinh(-np.log(10.)/2.5*dered_z-np.log(b[4]))\n\n\nivar_u = 2.*b[0]*np.cosh(-np.log(10.)/2.5*dered_u-np.log(b[0]))*(-np.log(10)/2.5)*err_u # noqa\nivar_g = 2.*b[1]*np.cosh(-np.log(10.)/2.5*dered_g-np.log(b[1]))*(-np.log(10)/2.5)*err_g # noqa\nivar_r = 2.*b[2]*np.cosh(-np.log(10.)/2.5*dered_r-np.log(b[2]))*(-np.log(10)/2.5)*err_r # noqa\nivar_i = 2.*b[3]*np.cosh(-np.log(10.)/2.5*dered_i-np.log(b[3]))*(-np.log(10)/2.5)*err_i # noqa\nivar_z = 2.*b[4]*np.cosh(-np.log(10.)/2.5*dered_z-np.log(b[4]))*(-np.log(10)/2.5)*err_z # noqa\n\nivar_u = 1./ivar_u**2.\nivar_g = 1./ivar_g**2.\nivar_r = 1./ivar_r**2.\nivar_i = 1./ivar_i**2.\nivar_z = 1./ivar_z**2.\n\nto_exp = np.transpose([z, flux_u, flux_g, flux_r, flux_i, flux_z,\n ivar_u, ivar_g, ivar_r, ivar_i, ivar_z])\nnp.savetxt('./output/maggies.txt',\n to_exp)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
236. Lowest Common Ancestor of a Binary Tree
https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
According to the definition of LCA on Wikipedia:
“The lowest common ancestor is defined between two nodes p and q as the lowest node in T that
has both p and q as descendants (where we allow a node to be a descendant of itself).”
Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
Example 1:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
Output: 3
Explanation: The LCA of nodes 5 and 1 is 3.
Example 2:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
Output: 5
Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of
itself according to the LCA definition.
Note:
All of the nodes' values will be unique.
p and q are different and both values will exist in the binary tree.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def postorder(self, node: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
'''
@return: p, q, their lca, or None
Improvement: record how many nodes are found to do early return
'''
if not node:
return None
if node == p or node == q:
# node is p, q or their lca
return node
left = self.postorder(node.left, p, q)
right = self.postorder(node.right, p, q)
if left:
if right:
return node # p,q is in left and right, node is lca
else:
return left # left is p or q
else:
if right:
return right # right is p or q
else:
return None # p or q not in node or its children
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
return self.postorder(root, p , q)
|
normal
|
{
"blob_id": "ec9184fa3562ef6015801edf316faa0097d1eb57",
"index": 4821,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def postorder(self, node: 'TreeNode', p: 'TreeNode', q: 'TreeNode'\n ) ->'TreeNode':\n \"\"\"\n @return: p, q, their lca, or None\n Improvement: record how many nodes are found to do early return\n \"\"\"\n if not node:\n return None\n if node == p or node == q:\n return node\n left = self.postorder(node.left, p, q)\n right = self.postorder(node.right, p, q)\n if left:\n if right:\n return node\n else:\n return left\n elif right:\n return right\n else:\n return None\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def postorder(self, node: 'TreeNode', p: 'TreeNode', q: 'TreeNode'\n ) ->'TreeNode':\n \"\"\"\n @return: p, q, their lca, or None\n Improvement: record how many nodes are found to do early return\n \"\"\"\n if not node:\n return None\n if node == p or node == q:\n return node\n left = self.postorder(node.left, p, q)\n right = self.postorder(node.right, p, q)\n if left:\n if right:\n return node\n else:\n return left\n elif right:\n return right\n else:\n return None\n\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q:\n 'TreeNode') ->'TreeNode':\n return self.postorder(root, p, q)\n",
"step-5": "'''\n236. Lowest Common Ancestor of a Binary Tree\nhttps://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/\n\nGiven a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.\n\nAccording to the definition of LCA on Wikipedia:\n“The lowest common ancestor is defined between two nodes p and q as the lowest node in T that\nhas both p and q as descendants (where we allow a node to be a descendant of itself).”\n\nGiven the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]\n\nExample 1:\n\nInput: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\nOutput: 3\nExplanation: The LCA of nodes 5 and 1 is 3.\n \nExample 2:\n\nInput: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\nOutput: 5\nExplanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of\nitself according to the LCA definition.\n \nNote:\n\nAll of the nodes' values will be unique.\np and q are different and both values will exist in the binary tree.\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def postorder(self, node: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n '''\n @return: p, q, their lca, or None\n Improvement: record how many nodes are found to do early return\n '''\n if not node:\n return None\n \n if node == p or node == q:\n # node is p, q or their lca\n return node\n \n left = self.postorder(node.left, p, q)\n right = self.postorder(node.right, p, q)\n \n if left:\n if right:\n return node # p,q is in left and right, node is lca\n else:\n return left # left is p or q\n else:\n if right:\n return right # right is p or q\n else:\n return None # p or q not in node or its children\n \n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n return self.postorder(root, p , q)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
# In[2]:
df = pd.read_csv("ipl_matches.csv")
df.head()
# In[3]:
## -----data cleaning------
## remove unwanted columns
columns_to_remove = ['mid','batsman','bowler','striker','non-striker']
df.drop(labels=columns_to_remove,axis=1,inplace=True)
# In[4]:
df.head()
# In[5]:
df['bat_team'].unique()
# In[6]:
### keeping only consistant team
consistant_team = ['Kolkata Knight Riders','Chennai Super Kings','Rajasthan Royals', 'Mumbai Indians',
'Kings XI Punjab', 'Royal Challengers Bangalore','Delhi Daredevils','Sunrisers Hyderabad',]
# In[7]:
df = df[(df['bat_team'].isin(consistant_team)) & (df['bowl_team'].isin(consistant_team))]
# In[8]:
df.head()
# In[9]:
df = df[df['overs']>=5.0]
# In[10]:
df.head()
# In[11]:
### converting the 'date' column from string to datetime object
from datetime import datetime
df['date'] = df['date'].apply(lambda x: datetime.strptime(x, '%d-%m-%Y'))
# In[12]:
df.head()
# In[13]:
print(df['bat_team'].unique())
print(df['bowl_team'].unique())
# In[14]:
###-------data processing-------
### converting the categoral features using one hot encoding
encoded_df = pd.get_dummies(data=df,columns=['venue','bat_team','bowl_team'])
encoded_df.head()
# In[15]:
encoded_df.columns
# In[16]:
### rearranging the columns
encoded_df = encoded_df[['date','runs', 'wickets', 'overs', 'runs_last_5', 'wickets_last_5',
'venue_Barabati Stadium', 'venue_Brabourne Stadium',
'venue_Buffalo Park', 'venue_De Beers Diamond Oval',
'venue_Dr DY Patil Sports Academy',
'venue_Dr. Y.S. Rajasekhara Reddy ACA-VDCA Cricket Stadium',
'venue_Dubai International Cricket Stadium', 'venue_Eden Gardens',
'venue_Feroz Shah Kotla',
'venue_Himachal Pradesh Cricket Association Stadium',
'venue_Holkar Cricket Stadium',
'venue_JSCA International Stadium Complex', 'venue_Kingsmead',
'venue_M Chinnaswamy Stadium', 'venue_MA Chidambaram Stadium, Chepauk',
'venue_Maharashtra Cricket Association Stadium',
'venue_New Wanderers Stadium', 'venue_Newlands',
'venue_OUTsurance Oval',
'venue_Punjab Cricket Association IS Bindra Stadium, Mohali',
'venue_Punjab Cricket Association Stadium, Mohali',
'venue_Rajiv Gandhi International Stadium, Uppal',
'venue_Sardar Patel Stadium, Motera', 'venue_Sawai Mansingh Stadium',
'venue_Shaheed Veer Narayan Singh International Stadium',
'venue_Sharjah Cricket Stadium', 'venue_Sheikh Zayed Stadium',
"venue_St George's Park", 'venue_Subrata Roy Sahara Stadium',
'venue_SuperSport Park', 'venue_Wankhede Stadium',
'bat_team_Chennai Super Kings', 'bat_team_Delhi Daredevils',
'bat_team_Kings XI Punjab', 'bat_team_Kolkata Knight Riders',
'bat_team_Mumbai Indians', 'bat_team_Rajasthan Royals',
'bat_team_Royal Challengers Bangalore', 'bat_team_Sunrisers Hyderabad',
'bowl_team_Chennai Super Kings', 'bowl_team_Delhi Daredevils',
'bowl_team_Kings XI Punjab', 'bowl_team_Kolkata Knight Riders',
'bowl_team_Mumbai Indians', 'bowl_team_Rajasthan Royals',
'bowl_team_Royal Challengers Bangalore',
'bowl_team_Sunrisers Hyderabad', 'total']]
# In[17]:
encoded_df.head()
# In[18]:
### Splitting the data into train and test dataset
x_train = encoded_df.drop(labels=['total'],axis=1)[encoded_df['date'].dt.year <=2016]
x_test = encoded_df.drop(labels=['total'],axis=1)[encoded_df['date'].dt.year >=2017]
# In[19]:
y_train = encoded_df[encoded_df['date'].dt.year <=2016]['total'].values
y_test = encoded_df[encoded_df['date'].dt.year >=2017]['total'].values
# In[20]:
### removing the 'date' column
x_train.drop(labels='date',axis=1,inplace=True)
x_test.drop(labels='date',axis=1,inplace=True)
# In[25]:
### -----Model Building-----
### Linear Regression
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train,y_train)
# In[26]:
### creating a pickel file for the classifier
import pickle
filename = 'model.pkl'
pickle.dump(regressor, open(filename, 'wb'))
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
normal
|
{
"blob_id": "3b1b3cab1fa197f75812ca5b1f044909914212c0",
"index": 9050,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.head()\n<mask token>\ndf.drop(labels=columns_to_remove, axis=1, inplace=True)\ndf.head()\ndf['bat_team'].unique()\n<mask token>\ndf.head()\n<mask token>\ndf.head()\n<mask token>\ndf.head()\nprint(df['bat_team'].unique())\nprint(df['bowl_team'].unique())\n<mask token>\nencoded_df.head()\nencoded_df.columns\n<mask token>\nencoded_df.head()\n<mask token>\nx_train.drop(labels='date', axis=1, inplace=True)\nx_test.drop(labels='date', axis=1, inplace=True)\n<mask token>\nregressor.fit(x_train, y_train)\n<mask token>\npickle.dump(regressor, open(filename, 'wb'))\n",
"step-3": "<mask token>\ndf = pd.read_csv('ipl_matches.csv')\ndf.head()\ncolumns_to_remove = ['mid', 'batsman', 'bowler', 'striker', 'non-striker']\ndf.drop(labels=columns_to_remove, axis=1, inplace=True)\ndf.head()\ndf['bat_team'].unique()\nconsistant_team = ['Kolkata Knight Riders', 'Chennai Super Kings',\n 'Rajasthan Royals', 'Mumbai Indians', 'Kings XI Punjab',\n 'Royal Challengers Bangalore', 'Delhi Daredevils', 'Sunrisers Hyderabad']\ndf = df[df['bat_team'].isin(consistant_team) & df['bowl_team'].isin(\n consistant_team)]\ndf.head()\ndf = df[df['overs'] >= 5.0]\ndf.head()\n<mask token>\ndf['date'] = df['date'].apply(lambda x: datetime.strptime(x, '%d-%m-%Y'))\ndf.head()\nprint(df['bat_team'].unique())\nprint(df['bowl_team'].unique())\nencoded_df = pd.get_dummies(data=df, columns=['venue', 'bat_team', 'bowl_team']\n )\nencoded_df.head()\nencoded_df.columns\nencoded_df = encoded_df[['date', 'runs', 'wickets', 'overs', 'runs_last_5',\n 'wickets_last_5', 'venue_Barabati Stadium', 'venue_Brabourne Stadium',\n 'venue_Buffalo Park', 'venue_De Beers Diamond Oval',\n 'venue_Dr DY Patil Sports Academy',\n 'venue_Dr. Y.S. Rajasekhara Reddy ACA-VDCA Cricket Stadium',\n 'venue_Dubai International Cricket Stadium', 'venue_Eden Gardens',\n 'venue_Feroz Shah Kotla',\n 'venue_Himachal Pradesh Cricket Association Stadium',\n 'venue_Holkar Cricket Stadium',\n 'venue_JSCA International Stadium Complex', 'venue_Kingsmead',\n 'venue_M Chinnaswamy Stadium', 'venue_MA Chidambaram Stadium, Chepauk',\n 'venue_Maharashtra Cricket Association Stadium',\n 'venue_New Wanderers Stadium', 'venue_Newlands',\n 'venue_OUTsurance Oval',\n 'venue_Punjab Cricket Association IS Bindra Stadium, Mohali',\n 'venue_Punjab Cricket Association Stadium, Mohali',\n 'venue_Rajiv Gandhi International Stadium, Uppal',\n 'venue_Sardar Patel Stadium, Motera', 'venue_Sawai Mansingh Stadium',\n 'venue_Shaheed Veer Narayan Singh International Stadium',\n 'venue_Sharjah Cricket Stadium', 'venue_Sheikh Zayed Stadium',\n \"venue_St George's Park\", 'venue_Subrata Roy Sahara Stadium',\n 'venue_SuperSport Park', 'venue_Wankhede Stadium',\n 'bat_team_Chennai Super Kings', 'bat_team_Delhi Daredevils',\n 'bat_team_Kings XI Punjab', 'bat_team_Kolkata Knight Riders',\n 'bat_team_Mumbai Indians', 'bat_team_Rajasthan Royals',\n 'bat_team_Royal Challengers Bangalore', 'bat_team_Sunrisers Hyderabad',\n 'bowl_team_Chennai Super Kings', 'bowl_team_Delhi Daredevils',\n 'bowl_team_Kings XI Punjab', 'bowl_team_Kolkata Knight Riders',\n 'bowl_team_Mumbai Indians', 'bowl_team_Rajasthan Royals',\n 'bowl_team_Royal Challengers Bangalore',\n 'bowl_team_Sunrisers Hyderabad', 'total']]\nencoded_df.head()\nx_train = encoded_df.drop(labels=['total'], axis=1)[encoded_df['date'].dt.\n year <= 2016]\nx_test = encoded_df.drop(labels=['total'], axis=1)[encoded_df['date'].dt.\n year >= 2017]\ny_train = encoded_df[encoded_df['date'].dt.year <= 2016]['total'].values\ny_test = encoded_df[encoded_df['date'].dt.year >= 2017]['total'].values\nx_train.drop(labels='date', axis=1, inplace=True)\nx_test.drop(labels='date', axis=1, inplace=True)\n<mask token>\nregressor = LinearRegression()\nregressor.fit(x_train, y_train)\n<mask token>\nfilename = 'model.pkl'\npickle.dump(regressor, open(filename, 'wb'))\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport seaborn as sns\ndf = pd.read_csv('ipl_matches.csv')\ndf.head()\ncolumns_to_remove = ['mid', 'batsman', 'bowler', 'striker', 'non-striker']\ndf.drop(labels=columns_to_remove, axis=1, inplace=True)\ndf.head()\ndf['bat_team'].unique()\nconsistant_team = ['Kolkata Knight Riders', 'Chennai Super Kings',\n 'Rajasthan Royals', 'Mumbai Indians', 'Kings XI Punjab',\n 'Royal Challengers Bangalore', 'Delhi Daredevils', 'Sunrisers Hyderabad']\ndf = df[df['bat_team'].isin(consistant_team) & df['bowl_team'].isin(\n consistant_team)]\ndf.head()\ndf = df[df['overs'] >= 5.0]\ndf.head()\nfrom datetime import datetime\ndf['date'] = df['date'].apply(lambda x: datetime.strptime(x, '%d-%m-%Y'))\ndf.head()\nprint(df['bat_team'].unique())\nprint(df['bowl_team'].unique())\nencoded_df = pd.get_dummies(data=df, columns=['venue', 'bat_team', 'bowl_team']\n )\nencoded_df.head()\nencoded_df.columns\nencoded_df = encoded_df[['date', 'runs', 'wickets', 'overs', 'runs_last_5',\n 'wickets_last_5', 'venue_Barabati Stadium', 'venue_Brabourne Stadium',\n 'venue_Buffalo Park', 'venue_De Beers Diamond Oval',\n 'venue_Dr DY Patil Sports Academy',\n 'venue_Dr. Y.S. Rajasekhara Reddy ACA-VDCA Cricket Stadium',\n 'venue_Dubai International Cricket Stadium', 'venue_Eden Gardens',\n 'venue_Feroz Shah Kotla',\n 'venue_Himachal Pradesh Cricket Association Stadium',\n 'venue_Holkar Cricket Stadium',\n 'venue_JSCA International Stadium Complex', 'venue_Kingsmead',\n 'venue_M Chinnaswamy Stadium', 'venue_MA Chidambaram Stadium, Chepauk',\n 'venue_Maharashtra Cricket Association Stadium',\n 'venue_New Wanderers Stadium', 'venue_Newlands',\n 'venue_OUTsurance Oval',\n 'venue_Punjab Cricket Association IS Bindra Stadium, Mohali',\n 'venue_Punjab Cricket Association Stadium, Mohali',\n 'venue_Rajiv Gandhi International Stadium, Uppal',\n 'venue_Sardar Patel Stadium, Motera', 'venue_Sawai Mansingh Stadium',\n 'venue_Shaheed Veer Narayan Singh International Stadium',\n 'venue_Sharjah Cricket Stadium', 'venue_Sheikh Zayed Stadium',\n \"venue_St George's Park\", 'venue_Subrata Roy Sahara Stadium',\n 'venue_SuperSport Park', 'venue_Wankhede Stadium',\n 'bat_team_Chennai Super Kings', 'bat_team_Delhi Daredevils',\n 'bat_team_Kings XI Punjab', 'bat_team_Kolkata Knight Riders',\n 'bat_team_Mumbai Indians', 'bat_team_Rajasthan Royals',\n 'bat_team_Royal Challengers Bangalore', 'bat_team_Sunrisers Hyderabad',\n 'bowl_team_Chennai Super Kings', 'bowl_team_Delhi Daredevils',\n 'bowl_team_Kings XI Punjab', 'bowl_team_Kolkata Knight Riders',\n 'bowl_team_Mumbai Indians', 'bowl_team_Rajasthan Royals',\n 'bowl_team_Royal Challengers Bangalore',\n 'bowl_team_Sunrisers Hyderabad', 'total']]\nencoded_df.head()\nx_train = encoded_df.drop(labels=['total'], axis=1)[encoded_df['date'].dt.\n year <= 2016]\nx_test = encoded_df.drop(labels=['total'], axis=1)[encoded_df['date'].dt.\n year >= 2017]\ny_train = encoded_df[encoded_df['date'].dt.year <= 2016]['total'].values\ny_test = encoded_df[encoded_df['date'].dt.year >= 2017]['total'].values\nx_train.drop(labels='date', axis=1, inplace=True)\nx_test.drop(labels='date', axis=1, inplace=True)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train, y_train)\nimport pickle\nfilename = 'model.pkl'\npickle.dump(regressor, open(filename, 'wb'))\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"ipl_matches.csv\")\ndf.head()\n\n\n# In[3]:\n\n\n## -----data cleaning------\n## remove unwanted columns\n\ncolumns_to_remove = ['mid','batsman','bowler','striker','non-striker']\ndf.drop(labels=columns_to_remove,axis=1,inplace=True)\n\n\n# In[4]:\n\n\ndf.head()\n\n\n# In[5]:\n\n\ndf['bat_team'].unique()\n\n\n# In[6]:\n\n\n### keeping only consistant team\n\nconsistant_team = ['Kolkata Knight Riders','Chennai Super Kings','Rajasthan Royals', 'Mumbai Indians',\n 'Kings XI Punjab', 'Royal Challengers Bangalore','Delhi Daredevils','Sunrisers Hyderabad',]\n\n\n# In[7]:\n\n\ndf = df[(df['bat_team'].isin(consistant_team)) & (df['bowl_team'].isin(consistant_team))]\n\n\n# In[8]:\n\n\ndf.head()\n\n\n# In[9]:\n\n\ndf = df[df['overs']>=5.0]\n\n\n# In[10]:\n\n\ndf.head()\n\n\n# In[11]:\n\n\n### converting the 'date' column from string to datetime object\n\nfrom datetime import datetime\ndf['date'] = df['date'].apply(lambda x: datetime.strptime(x, '%d-%m-%Y'))\n\n\n# In[12]:\n\n\ndf.head()\n\n\n# In[13]:\n\n\nprint(df['bat_team'].unique())\nprint(df['bowl_team'].unique())\n\n\n# In[14]:\n\n\n###-------data processing-------\n### converting the categoral features using one hot encoding\n\nencoded_df = pd.get_dummies(data=df,columns=['venue','bat_team','bowl_team'])\nencoded_df.head()\n\n\n# In[15]:\n\n\nencoded_df.columns\n\n\n# In[16]:\n\n\n### rearranging the columns\n\nencoded_df = encoded_df[['date','runs', 'wickets', 'overs', 'runs_last_5', 'wickets_last_5',\n 'venue_Barabati Stadium', 'venue_Brabourne Stadium',\n 'venue_Buffalo Park', 'venue_De Beers Diamond Oval',\n 'venue_Dr DY Patil Sports Academy',\n 'venue_Dr. Y.S. Rajasekhara Reddy ACA-VDCA Cricket Stadium',\n 'venue_Dubai International Cricket Stadium', 'venue_Eden Gardens',\n 'venue_Feroz Shah Kotla',\n 'venue_Himachal Pradesh Cricket Association Stadium',\n 'venue_Holkar Cricket Stadium',\n 'venue_JSCA International Stadium Complex', 'venue_Kingsmead',\n 'venue_M Chinnaswamy Stadium', 'venue_MA Chidambaram Stadium, Chepauk',\n 'venue_Maharashtra Cricket Association Stadium',\n 'venue_New Wanderers Stadium', 'venue_Newlands',\n 'venue_OUTsurance Oval',\n 'venue_Punjab Cricket Association IS Bindra Stadium, Mohali',\n 'venue_Punjab Cricket Association Stadium, Mohali',\n 'venue_Rajiv Gandhi International Stadium, Uppal',\n 'venue_Sardar Patel Stadium, Motera', 'venue_Sawai Mansingh Stadium',\n 'venue_Shaheed Veer Narayan Singh International Stadium',\n 'venue_Sharjah Cricket Stadium', 'venue_Sheikh Zayed Stadium',\n \"venue_St George's Park\", 'venue_Subrata Roy Sahara Stadium',\n 'venue_SuperSport Park', 'venue_Wankhede Stadium',\n 'bat_team_Chennai Super Kings', 'bat_team_Delhi Daredevils',\n 'bat_team_Kings XI Punjab', 'bat_team_Kolkata Knight Riders',\n 'bat_team_Mumbai Indians', 'bat_team_Rajasthan Royals',\n 'bat_team_Royal Challengers Bangalore', 'bat_team_Sunrisers Hyderabad',\n 'bowl_team_Chennai Super Kings', 'bowl_team_Delhi Daredevils',\n 'bowl_team_Kings XI Punjab', 'bowl_team_Kolkata Knight Riders',\n 'bowl_team_Mumbai Indians', 'bowl_team_Rajasthan Royals',\n 'bowl_team_Royal Challengers Bangalore',\n 'bowl_team_Sunrisers Hyderabad', 'total']]\n\n\n# In[17]:\n\n\nencoded_df.head()\n\n\n# In[18]:\n\n\n### Splitting the data into train and test dataset\n\nx_train = encoded_df.drop(labels=['total'],axis=1)[encoded_df['date'].dt.year <=2016]\nx_test = encoded_df.drop(labels=['total'],axis=1)[encoded_df['date'].dt.year >=2017]\n\n\n# In[19]:\n\n\ny_train = encoded_df[encoded_df['date'].dt.year <=2016]['total'].values\ny_test = encoded_df[encoded_df['date'].dt.year >=2017]['total'].values\n\n\n# In[20]:\n\n\n### removing the 'date' column\n\nx_train.drop(labels='date',axis=1,inplace=True)\nx_test.drop(labels='date',axis=1,inplace=True)\n\n\n# In[25]:\n\n\n### -----Model Building-----\n### Linear Regression\n\nfrom sklearn.linear_model import LinearRegression\n\nregressor = LinearRegression()\nregressor.fit(x_train,y_train)\n\n\n# In[26]:\n\n\n### creating a pickel file for the classifier\n\nimport pickle\nfilename = 'model.pkl'\npickle.dump(regressor, open(filename, 'wb'))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MSITableColumnInfo(NamedTuple):
<|reserved_special_token_0|>
number: int
attributes: int
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def length(self) ->int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 255
<|reserved_special_token_0|>
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(f'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) ->Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
_CUSTOM_ACTION_TYPES = {(1):
'DLL file stored in a Binary table stream.', (2):
'EXE file stored in a Binary table stream.', (5):
'JScript file stored in a Binary table stream.', (6):
'VBScript file stored in a Binary table stream.', (17):
'DLL file that is installed with a product.', (18):
'EXE file that is installed with a product.', (19):
'Displays a specified error message and returns failure, terminating the installation.'
, (21): 'JScript file that is installed with a product.', (22):
'VBScript file that is installed with a product.', (34):
'EXE file having a path referencing a directory.', (35):
'Directory set with formatted text.', (37):
'JScript text stored in this sequence table.', (38):
'VBScript text stored in this sequence table.', (50):
'EXE file having a path specified by a property value.', (51):
'Property set with formatted text.', (53):
'JScript text specified by a property value.', (54):
'VBScript text specified by a property value.'}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(f'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(f'<{sc * row_count}') for sc in
row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]
] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(
stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number,
col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream(
'!_Tables'), 2, False)}
table_names_known = set(tables)
for name in (table_names_known - table_names_given):
self.log_warn(f'table name known but not given: {name}')
for name in (table_names_given - table_names_known):
self.log_warn(f'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return f'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(
"""(?x)
\\[ # open square brackent
(?![~\\\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\\]{}]+) # no brackets or braces
\\]"""
, _replace, string)
if _replace.done:
break
string = re.sub('\\[\\\\(.)\\]', '\\1', string)
string = string.replace('[~]', '\x00')
return string
for table_name, table in tables.items():
stream_name = f'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name),
column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 2147483648
elif vt is MsiType.Short:
if value != 0:
value -= 32768
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = f'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack('<IIII', row[2] ^
2147483648, row[3] ^ 2147483648, row[4] ^
2147483648, row[5] ^ 2147483648).hex()
if table_name == 'CustomAction':
code = row[1] & 63
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {(37): 'js', (38): 'vbs', (51): None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = f'Action/{path}'
if item.extension:
path = f'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = f'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in ['[5]SummaryInformation',
'[5]DocumentSummaryInformation', '[5]DigitalSignature',
'[5]MsiDigitalSignatureEx']:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(
f'string reference count computed={c} provided={p}:',
strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(
f'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = f'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(
processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(b'\xd0\xcf\x11\xe0'):
return False
return FileMagicInfo(data).extension == 'msi'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MsiType(enum.IntEnum):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) ->MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 4095)
else:
return MsiType(self.attributes & 3840)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) ->bool:
return self.attributes & 3840 < 2048
@property
def is_key(self) ->bool:
return self.attributes & 8192 == 8192
@property
def is_nullable(self) ->bool:
return self.attributes & 4096 == 4096
@property
def length(self) ->int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 255
@property
def struct_format(self) ->str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(f'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) ->Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
_CUSTOM_ACTION_TYPES = {(1):
'DLL file stored in a Binary table stream.', (2):
'EXE file stored in a Binary table stream.', (5):
'JScript file stored in a Binary table stream.', (6):
'VBScript file stored in a Binary table stream.', (17):
'DLL file that is installed with a product.', (18):
'EXE file that is installed with a product.', (19):
'Displays a specified error message and returns failure, terminating the installation.'
, (21): 'JScript file that is installed with a product.', (22):
'VBScript file that is installed with a product.', (34):
'EXE file having a path referencing a directory.', (35):
'Directory set with formatted text.', (37):
'JScript text stored in this sequence table.', (38):
'VBScript text stored in this sequence table.', (50):
'EXE file having a path specified by a property value.', (51):
'Property set with formatted text.', (53):
'JScript text specified by a property value.', (54):
'VBScript text specified by a property value.'}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(f'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(f'<{sc * row_count}') for sc in
row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]
] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(
stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number,
col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream(
'!_Tables'), 2, False)}
table_names_known = set(tables)
for name in (table_names_known - table_names_given):
self.log_warn(f'table name known but not given: {name}')
for name in (table_names_given - table_names_known):
self.log_warn(f'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return f'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(
"""(?x)
\\[ # open square brackent
(?![~\\\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\\]{}]+) # no brackets or braces
\\]"""
, _replace, string)
if _replace.done:
break
string = re.sub('\\[\\\\(.)\\]', '\\1', string)
string = string.replace('[~]', '\x00')
return string
for table_name, table in tables.items():
stream_name = f'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name),
column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 2147483648
elif vt is MsiType.Short:
if value != 0:
value -= 32768
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = f'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack('<IIII', row[2] ^
2147483648, row[3] ^ 2147483648, row[4] ^
2147483648, row[5] ^ 2147483648).hex()
if table_name == 'CustomAction':
code = row[1] & 63
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {(37): 'js', (38): 'vbs', (51): None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = f'Action/{path}'
if item.extension:
path = f'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = f'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in ['[5]SummaryInformation',
'[5]DocumentSummaryInformation', '[5]DigitalSignature',
'[5]MsiDigitalSignatureEx']:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(
f'string reference count computed={c} provided={p}:',
strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(
f'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = f'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(
processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(b'\xd0\xcf\x11\xe0'):
return False
return FileMagicInfo(data).extension == 'msi'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MsiType(enum.IntEnum):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) ->MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 4095)
else:
return MsiType(self.attributes & 3840)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) ->bool:
return self.attributes & 3840 < 2048
@property
def is_key(self) ->bool:
return self.attributes & 8192 == 8192
@property
def is_nullable(self) ->bool:
return self.attributes & 4096 == 4096
@property
def length(self) ->int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 255
@property
def struct_format(self) ->str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(f'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) ->Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
_CUSTOM_ACTION_TYPES = {(1):
'DLL file stored in a Binary table stream.', (2):
'EXE file stored in a Binary table stream.', (5):
'JScript file stored in a Binary table stream.', (6):
'VBScript file stored in a Binary table stream.', (17):
'DLL file that is installed with a product.', (18):
'EXE file that is installed with a product.', (19):
'Displays a specified error message and returns failure, terminating the installation.'
, (21): 'JScript file that is installed with a product.', (22):
'VBScript file that is installed with a product.', (34):
'EXE file having a path referencing a directory.', (35):
'Directory set with formatted text.', (37):
'JScript text stored in this sequence table.', (38):
'VBScript text stored in this sequence table.', (50):
'EXE file having a path specified by a property value.', (51):
'Property set with formatted text.', (53):
'JScript text specified by a property value.', (54):
'VBScript text specified by a property value.'}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(f'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(f'<{sc * row_count}') for sc in
row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]
] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(
stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number,
col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream(
'!_Tables'), 2, False)}
table_names_known = set(tables)
for name in (table_names_known - table_names_given):
self.log_warn(f'table name known but not given: {name}')
for name in (table_names_given - table_names_known):
self.log_warn(f'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return f'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(
"""(?x)
\\[ # open square brackent
(?![~\\\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\\]{}]+) # no brackets or braces
\\]"""
, _replace, string)
if _replace.done:
break
string = re.sub('\\[\\\\(.)\\]', '\\1', string)
string = string.replace('[~]', '\x00')
return string
for table_name, table in tables.items():
stream_name = f'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name),
column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 2147483648
elif vt is MsiType.Short:
if value != 0:
value -= 32768
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = f'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack('<IIII', row[2] ^
2147483648, row[3] ^ 2147483648, row[4] ^
2147483648, row[5] ^ 2147483648).hex()
if table_name == 'CustomAction':
code = row[1] & 63
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {(37): 'js', (38): 'vbs', (51): None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = f'Action/{path}'
if item.extension:
path = f'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = f'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in ['[5]SummaryInformation',
'[5]DocumentSummaryInformation', '[5]DigitalSignature',
'[5]MsiDigitalSignatureEx']:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(
f'string reference count computed={c} provided={p}:',
strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(
f'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = f'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(
processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(b'\xd0\xcf\x11\xe0'):
return False
return FileMagicInfo(data).extension == 'msi'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 260
Short = 1282
Binary = 2304
String = 3328
StringLocalized = 3840
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) ->MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 4095)
else:
return MsiType(self.attributes & 3840)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) ->bool:
return self.attributes & 3840 < 2048
@property
def is_key(self) ->bool:
return self.attributes & 8192 == 8192
@property
def is_nullable(self) ->bool:
return self.attributes & 4096 == 4096
@property
def length(self) ->int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 255
@property
def struct_format(self) ->str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(f'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) ->Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
_CUSTOM_ACTION_TYPES = {(1):
'DLL file stored in a Binary table stream.', (2):
'EXE file stored in a Binary table stream.', (5):
'JScript file stored in a Binary table stream.', (6):
'VBScript file stored in a Binary table stream.', (17):
'DLL file that is installed with a product.', (18):
'EXE file that is installed with a product.', (19):
'Displays a specified error message and returns failure, terminating the installation.'
, (21): 'JScript file that is installed with a product.', (22):
'VBScript file that is installed with a product.', (34):
'EXE file having a path referencing a directory.', (35):
'Directory set with formatted text.', (37):
'JScript text stored in this sequence table.', (38):
'VBScript text stored in this sequence table.', (50):
'EXE file having a path specified by a property value.', (51):
'Property set with formatted text.', (53):
'JScript text specified by a property value.', (54):
'VBScript text specified by a property value.'}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(f'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(f'<{sc * row_count}') for sc in
row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]
] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(
stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number,
col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream(
'!_Tables'), 2, False)}
table_names_known = set(tables)
for name in (table_names_known - table_names_given):
self.log_warn(f'table name known but not given: {name}')
for name in (table_names_given - table_names_known):
self.log_warn(f'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return f'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(
"""(?x)
\\[ # open square brackent
(?![~\\\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\\]{}]+) # no brackets or braces
\\]"""
, _replace, string)
if _replace.done:
break
string = re.sub('\\[\\\\(.)\\]', '\\1', string)
string = string.replace('[~]', '\x00')
return string
for table_name, table in tables.items():
stream_name = f'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name),
column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 2147483648
elif vt is MsiType.Short:
if value != 0:
value -= 32768
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = f'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack('<IIII', row[2] ^
2147483648, row[3] ^ 2147483648, row[4] ^
2147483648, row[5] ^ 2147483648).hex()
if table_name == 'CustomAction':
code = row[1] & 63
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {(37): 'js', (38): 'vbs', (51): None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = f'Action/{path}'
if item.extension:
path = f'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = f'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in ['[5]SummaryInformation',
'[5]DocumentSummaryInformation', '[5]DigitalSignature',
'[5]MsiDigitalSignatureEx']:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(
f'string reference count computed={c} provided={p}:',
strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(
f'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = f'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(
processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(b'\xd0\xcf\x11\xe0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 0x104
Short = 0x502
Binary = 0x900
String = 0xD00
StringLocalized = 0xF00
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) -> MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 0xFFF)
else:
return MsiType(self.attributes & 0xF00)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) -> bool:
return self.attributes & 0x0F00 < 0x800
@property
def is_key(self) -> bool:
return self.attributes & 0x2000 == 0x2000
@property
def is_nullable(self) -> bool:
return self.attributes & 0x1000 == 0x1000
@property
def length(self) -> int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 0xFF
@property
def struct_format(self) -> str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(F'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) -> Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
# https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types
_CUSTOM_ACTION_TYPES = {
0x01: 'DLL file stored in a Binary table stream.',
0x02: 'EXE file stored in a Binary table stream.',
0x05: 'JScript file stored in a Binary table stream.',
0x06: 'VBScript file stored in a Binary table stream.',
0x11: 'DLL file that is installed with a product.',
0x12: 'EXE file that is installed with a product.',
0x13: 'Displays a specified error message and returns failure, terminating the installation.',
0x15: 'JScript file that is installed with a product.',
0x16: 'VBScript file that is installed with a product.',
0x22: 'EXE file having a path referencing a directory.',
0x23: 'Directory set with formatted text.',
0x25: 'JScript text stored in this sequence table.',
0x26: 'VBScript text stored in this sequence table.',
0x32: 'EXE file having a path specified by a property value.',
0x33: 'Property set with formatted text.',
0x35: 'JScript text specified by a property value.',
0x36: 'VBScript text specified by a property value.',
}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(F'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}
table_names_known = set(tables)
for name in table_names_known - table_names_given:
self.log_warn(F'table name known but not given: {name}')
for name in table_names_given - table_names_known:
self.log_warn(F'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
# https://learn.microsoft.com/en-us/windows/win32/msi/formatted
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return F'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(R'''(?x)
\[ # open square brackent
(?![~\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\]{}]+) # no brackets or braces
\]''', _replace, string)
if _replace.done:
break
string = re.sub(r'\[\\(.)\]', r'\1', string)
string = string.replace('[~]', '\0')
return string
for table_name, table in tables.items():
stream_name = F'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 0x80000000
elif vt is MsiType.Short:
if value != 0:
value -= 0x8000
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = F'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack(
'<IIII',
row[2] ^ 0x80000000,
row[3] ^ 0x80000000,
row[4] ^ 0x80000000,
row[5] ^ 0x80000000,
).hex()
if table_name == 'CustomAction':
code = row[1] & 0x3F
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {0x25: 'js', 0x26: 'vbs', 0x33: None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = F'Action/{path}'
if item.extension:
path = F'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = F'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in [
'[5]SummaryInformation',
'[5]DocumentSummaryInformation',
'[5]DigitalSignature',
'[5]MsiDigitalSignatureEx'
]:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(F'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = F'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,
json.dumps(processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(B'\xD0\xCF\x11\xE0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
|
flexible
|
{
"blob_id": "566dab589cdb04332a92138b1a1faf53cd0f58b8",
"index": 5419,
"step-1": "<mask token>\n\n\nclass MSITableColumnInfo(NamedTuple):\n <mask token>\n number: int\n attributes: int\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n <mask token>\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MsiType(enum.IntEnum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MSITableColumnInfo(NamedTuple):\n \"\"\"\n Represents information about an MSI table column. See also:\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\n \"\"\"\n number: int\n attributes: int\n\n @property\n def type(self) ->MsiType:\n try:\n if self.is_integer:\n return MsiType(self.attributes & 4095)\n else:\n return MsiType(self.attributes & 3840)\n except Exception:\n return MsiType.Unknown\n\n @property\n def is_integer(self) ->bool:\n return self.attributes & 3840 < 2048\n\n @property\n def is_key(self) ->bool:\n return self.attributes & 8192 == 8192\n\n @property\n def is_nullable(self) ->bool:\n return self.attributes & 4096 == 4096\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n\n @property\n def struct_format(self) ->str:\n vt = self.type\n if vt is MsiType.Long:\n return 'I'\n elif vt is MsiType.Short:\n return 'H'\n else:\n return 'H'\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MsiType(enum.IntEnum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\nclass MSITableColumnInfo(NamedTuple):\n \"\"\"\n Represents information about an MSI table column. See also:\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\n \"\"\"\n number: int\n attributes: int\n\n @property\n def type(self) ->MsiType:\n try:\n if self.is_integer:\n return MsiType(self.attributes & 4095)\n else:\n return MsiType(self.attributes & 3840)\n except Exception:\n return MsiType.Unknown\n\n @property\n def is_integer(self) ->bool:\n return self.attributes & 3840 < 2048\n\n @property\n def is_key(self) ->bool:\n return self.attributes & 8192 == 8192\n\n @property\n def is_nullable(self) ->bool:\n return self.attributes & 4096 == 4096\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n\n @property\n def struct_format(self) ->str:\n vt = self.type\n if vt is MsiType.Long:\n return 'I'\n elif vt is MsiType.Short:\n return 'H'\n else:\n return 'H'\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\n<mask token>\n",
"step-4": "from __future__ import annotations\nfrom typing import List, Dict, NamedTuple, Union, Optional\nimport codecs\nimport collections\nimport enum\nimport json\nimport re\nimport struct\nfrom refinery.lib.structures import StructReader\nfrom refinery.units.formats.office.xtdoc import xtdoc, UnpackResult\nfrom refinery.lib import chunks\nfrom refinery.lib.types import ByteStr\nfrom refinery.lib.mime import FileMagicInfo\nfrom refinery.lib.tools import cached_property\n\n\nclass MsiType(enum.IntEnum):\n \"\"\"\n Known data types for MSI table cell entries.\n \"\"\"\n Long = 260\n Short = 1282\n Binary = 2304\n String = 3328\n StringLocalized = 3840\n Unknown = 0\n\n def __str__(self):\n return self.name\n\n\nclass MSITableColumnInfo(NamedTuple):\n \"\"\"\n Represents information about an MSI table column. See also:\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\n \"\"\"\n number: int\n attributes: int\n\n @property\n def type(self) ->MsiType:\n try:\n if self.is_integer:\n return MsiType(self.attributes & 4095)\n else:\n return MsiType(self.attributes & 3840)\n except Exception:\n return MsiType.Unknown\n\n @property\n def is_integer(self) ->bool:\n return self.attributes & 3840 < 2048\n\n @property\n def is_key(self) ->bool:\n return self.attributes & 8192 == 8192\n\n @property\n def is_nullable(self) ->bool:\n return self.attributes & 4096 == 4096\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n\n @property\n def struct_format(self) ->str:\n vt = self.type\n if vt is MsiType.Long:\n return 'I'\n elif vt is MsiType.Short:\n return 'H'\n else:\n return 'H'\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\nxtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)\n",
"step-5": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nfrom __future__ import annotations\r\nfrom typing import List, Dict, NamedTuple, Union, Optional\r\n\r\nimport codecs\r\nimport collections\r\nimport enum\r\nimport json\r\nimport re\r\nimport struct\r\n\r\nfrom refinery.lib.structures import StructReader\r\nfrom refinery.units.formats.office.xtdoc import xtdoc, UnpackResult\r\nfrom refinery.lib import chunks\r\nfrom refinery.lib.types import ByteStr\r\nfrom refinery.lib.mime import FileMagicInfo\r\nfrom refinery.lib.tools import cached_property\r\n\r\n\r\nclass MsiType(enum.IntEnum):\r\n \"\"\"\r\n Known data types for MSI table cell entries.\r\n \"\"\"\r\n\r\n Long = 0x104\r\n Short = 0x502\r\n Binary = 0x900\r\n String = 0xD00\r\n StringLocalized = 0xF00\r\n Unknown = 0\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass MSITableColumnInfo(NamedTuple):\r\n \"\"\"\r\n Represents information about an MSI table column. See also:\r\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\r\n \"\"\"\r\n number: int\r\n attributes: int\r\n\r\n @property\r\n def type(self) -> MsiType:\r\n try:\r\n if self.is_integer:\r\n return MsiType(self.attributes & 0xFFF)\r\n else:\r\n return MsiType(self.attributes & 0xF00)\r\n except Exception:\r\n return MsiType.Unknown\r\n\r\n @property\r\n def is_integer(self) -> bool:\r\n return self.attributes & 0x0F00 < 0x800\r\n\r\n @property\r\n def is_key(self) -> bool:\r\n return self.attributes & 0x2000 == 0x2000\r\n\r\n @property\r\n def is_nullable(self) -> bool:\r\n return self.attributes & 0x1000 == 0x1000\r\n\r\n @property\r\n def length(self) -> int:\r\n vt = self.type\r\n if vt is MsiType.Long:\r\n return 4\r\n if vt is MsiType.Short:\r\n return 2\r\n return self.attributes & 0xFF\r\n\r\n @property\r\n def struct_format(self) -> str:\r\n vt = self.type\r\n if vt is MsiType.Long:\r\n return 'I'\r\n elif vt is MsiType.Short:\r\n return 'H'\r\n else:\r\n return 'H'\r\n\r\n\r\nclass MSIStringData:\r\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\r\n data = StructReader(string_data)\r\n pool = StructReader(string_pool)\r\n self.strings: List[bytes] = []\r\n self.provided_ref_count: List[int] = []\r\n self.computed_ref_count: List[int] = []\r\n self.codepage = pool.u16()\r\n self._unknown = pool.u16()\r\n while not pool.eof:\r\n size, rc = pool.read_struct('<HH')\r\n string = data.read_bytes(size)\r\n self.strings.append(string)\r\n self.provided_ref_count.append(rc)\r\n self.computed_ref_count.append(0)\r\n\r\n @cached_property\r\n def codec(self):\r\n try:\r\n return codecs.lookup(F'cp{self.codepage}').name\r\n except Exception:\r\n xtmsi.log_info('failed looking up codec', self.codepage)\r\n return 'latin1'\r\n\r\n def __len__(self):\r\n return len(self.strings)\r\n\r\n def __iter__(self):\r\n yield from range(1, len(self) + 1)\r\n\r\n def __contains__(self, index):\r\n return 0 < index <= len(self)\r\n\r\n def ref(self, index: int, increment=True) -> Union[str, bytes]:\r\n assert index > 0\r\n index -= 1\r\n if increment:\r\n self.computed_ref_count[index] += 1\r\n data = self.strings[index]\r\n data = data.decode(self.codec)\r\n return data\r\n\r\n\r\nclass xtmsi(xtdoc):\r\n \"\"\"\r\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\r\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\r\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\r\n a virtual folder named \"Action\".\r\n \"\"\"\r\n\r\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\r\n\r\n # https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types\r\n _CUSTOM_ACTION_TYPES = {\r\n 0x01: 'DLL file stored in a Binary table stream.',\r\n 0x02: 'EXE file stored in a Binary table stream.',\r\n 0x05: 'JScript file stored in a Binary table stream.',\r\n 0x06: 'VBScript file stored in a Binary table stream.',\r\n 0x11: 'DLL file that is installed with a product.',\r\n 0x12: 'EXE file that is installed with a product.',\r\n 0x13: 'Displays a specified error message and returns failure, terminating the installation.',\r\n 0x15: 'JScript file that is installed with a product.',\r\n 0x16: 'VBScript file that is installed with a product.',\r\n 0x22: 'EXE file having a path referencing a directory.',\r\n 0x23: 'Directory set with formatted text.',\r\n 0x25: 'JScript text stored in this sequence table.',\r\n 0x26: 'VBScript text stored in this sequence table.',\r\n 0x32: 'EXE file having a path specified by a property value.',\r\n 0x33: 'Property set with formatted text.',\r\n 0x35: 'JScript text specified by a property value.',\r\n 0x36: 'VBScript text specified by a property value.',\r\n }\r\n\r\n def unpack(self, data):\r\n streams = {result.path: result for result in super().unpack(data)}\r\n\r\n def stream(name: str):\r\n return streams.pop(name).get_data()\r\n\r\n def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:\r\n return ''.join(v.struct_format for v in table.values())\r\n\r\n def stream_to_rows(data: ByteStr, row_format: str):\r\n row_size = struct.calcsize(F'<{row_format}')\r\n row_count = int(len(data) / row_size)\r\n reader = StructReader(data)\r\n columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]\r\n for i in range(row_count):\r\n yield [c[i] for c in columns]\r\n\r\n tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)\r\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\r\n\r\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):\r\n tbl_name = strings.ref(tbl_name_id)\r\n col_name = strings.ref(col_name_id)\r\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)\r\n\r\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}\r\n table_names_known = set(tables)\r\n\r\n for name in table_names_known - table_names_given:\r\n self.log_warn(F'table name known but not given: {name}')\r\n for name in table_names_given - table_names_known:\r\n self.log_warn(F'table name given but not known: {name}')\r\n\r\n class ScriptItem(NamedTuple):\r\n row_index: int\r\n extension: Optional[str]\r\n\r\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\r\n tbl_properties: Dict[str, str] = {}\r\n tbl_files: Dict[str, str] = {}\r\n tbl_components: Dict[str, str] = {}\r\n postprocessing: List[ScriptItem] = []\r\n\r\n def format_string(string: str):\r\n # https://learn.microsoft.com/en-us/windows/win32/msi/formatted\r\n def _replace(match: re.Match[str]):\r\n _replace.done = False\r\n prefix, name = match.groups()\r\n if not prefix:\r\n tbl = tbl_properties\r\n elif prefix in '%':\r\n name = name.rstrip('%').upper()\r\n return F'%{name}%'\r\n elif prefix in '!#':\r\n tbl = tbl_files\r\n elif prefix in '$':\r\n tbl = tbl_components\r\n else:\r\n raise ValueError\r\n return tbl.get(name, '')\r\n while True:\r\n _replace.done = True\r\n string = re.sub(R'''(?x)\r\n \\[ # open square brackent\r\n (?![~\\\\]) # not followed by escapes\r\n ([%$!#]?) # any of the valid prefix characters\r\n ([^[\\]{}]+) # no brackets or braces\r\n \\]''', _replace, string)\r\n if _replace.done:\r\n break\r\n string = re.sub(r'\\[\\\\(.)\\]', r'\\1', string)\r\n string = string.replace('[~]', '\\0')\r\n return string\r\n\r\n for table_name, table in tables.items():\r\n stream_name = F'!{table_name}'\r\n if stream_name not in streams:\r\n continue\r\n processed = []\r\n info = list(table.values())\r\n for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):\r\n values = []\r\n for index, value in enumerate(row):\r\n vt = info[index].type\r\n if vt is MsiType.Long:\r\n if value != 0:\r\n value -= 0x80000000\r\n elif vt is MsiType.Short:\r\n if value != 0:\r\n value -= 0x8000\r\n elif value in strings:\r\n value = strings.ref(value)\r\n elif not info[index].is_integer:\r\n value = ''\r\n values.append(value)\r\n if table_name == 'Property':\r\n tbl_properties[values[0]] = values[1]\r\n if table_name == 'File':\r\n tbl_properties[values[0]] = values[2]\r\n if table_name == 'Component':\r\n tbl_properties[values[0]] = F'%{values[2]}%'\r\n entry = dict(zip(table, values))\r\n einfo = {t: i for t, i in zip(table, info)}\r\n if table_name == 'MsiFileHash':\r\n entry['Hash'] = struct.pack(\r\n '<IIII',\r\n row[2] ^ 0x80000000,\r\n row[3] ^ 0x80000000,\r\n row[4] ^ 0x80000000,\r\n row[5] ^ 0x80000000,\r\n ).hex()\r\n if table_name == 'CustomAction':\r\n code = row[1] & 0x3F\r\n try:\r\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\r\n except LookupError:\r\n pass\r\n t = einfo.get('Target')\r\n c = {0x25: 'js', 0x26: 'vbs', 0x33: None}\r\n if code in c and t and not t.is_integer:\r\n postprocessing.append(ScriptItem(r, c[code]))\r\n processed.append(entry)\r\n if processed:\r\n processed_table_data[table_name] = processed\r\n\r\n ca = processed_table_data.get('CustomAction', None)\r\n for item in postprocessing:\r\n entry = ca[item.row_index]\r\n try:\r\n path: str = entry['Action']\r\n data: str = entry['Target']\r\n except KeyError:\r\n continue\r\n root = F'Action/{path}'\r\n if item.extension:\r\n path = F'{root}.{item.extension}'\r\n streams[path] = UnpackResult(path, data.encode(self.codec))\r\n continue\r\n data = format_string(data)\r\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\r\n if not all(part[1] == '\\x02' for part in parts):\r\n continue\r\n for name, _, script in parts:\r\n if not name.lower().startswith('script'):\r\n continue\r\n if not script:\r\n continue\r\n path = F'{root}.{name}'\r\n streams[path] = UnpackResult(path, script.encode(self.codec))\r\n\r\n for ignored_stream in [\r\n '[5]SummaryInformation',\r\n '[5]DocumentSummaryInformation',\r\n '[5]DigitalSignature',\r\n '[5]MsiDigitalSignatureEx'\r\n ]:\r\n streams.pop(ignored_stream, None)\r\n\r\n inconsistencies = 0\r\n for k in range(len(strings)):\r\n c = strings.computed_ref_count[k]\r\n p = strings.provided_ref_count[k]\r\n if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):\r\n inconsistencies += 1\r\n if inconsistencies:\r\n self.log_info(F'found {inconsistencies} incorrect string reference counts')\r\n\r\n def fix_msi_path(path: str):\r\n prefix, dot, name = path.partition('.')\r\n if dot == '.' and prefix.lower() == 'binary':\r\n path = F'{prefix}/{name}'\r\n return path\r\n\r\n streams = {fix_msi_path(path): item for path, item in streams.items()}\r\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,\r\n json.dumps(processed_table_data, indent=4).encode(self.codec))\r\n streams[ds.path] = ds\r\n\r\n for path in sorted(streams):\r\n streams[path].path = path\r\n yield streams[path]\r\n\r\n @classmethod\r\n def handles(self, data: bytearray):\r\n if not data.startswith(B'\\xD0\\xCF\\x11\\xE0'):\r\n return False\r\n return FileMagicInfo(data).extension == 'msi'\r\n\r\n\r\nxtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)\r\n",
"step-ids": [
14,
21,
22,
26,
27
]
}
|
[
14,
21,
22,
26,
27
] |
#!/usr/bin/python
# coding=utf-8
import time
import atexit
# for signal handling
import signal
import sys
# ----------------------
# Encoder stuff
# ----------------------
import RPi.GPIO as GPIO
# init
GPIO.setmode(GPIO.BCM) # use the GPIO names, _not_ the pin numbers on the board
# Raspberry Pi pin configuration:
# pins BCM BOARD
leftEncoderGPIO = 27 # pin
rightEncoderGPIO = 22 # pin
# setup
print("setup...")
GPIO.setup(leftEncoderGPIO, GPIO.IN)
GPIO.setup(rightEncoderGPIO, GPIO.IN)
# for counting encoder steps
leftSteps = 0
rightSteps = 0
# driven distance in cm
leftDistance = 0
rightDistance = 0
# encoder pulse detection by interrupt
def leftEncoderCallback(answer):
global leftSteps
leftSteps = leftSteps +1
# measure distance
global leftDistance
leftDistance = leftDistance + 0.24
print("Left Encoder.")
def rightEncoderCallback(answer):
global rightSteps
rightSteps = rightSteps +1
global rightDistance
rightDistance = rightDistance + 0.24
print("Right Encoder.")
# add GPIO event detectors
print("registering event handlers...")
# enabling event handlers (if needed only)
def enableEncoderTracking():
GPIO.add_event_detect(leftEncoderGPIO, GPIO.FALLING, callback=leftEncoderCallback)
GPIO.add_event_detect(rightEncoderGPIO, GPIO.FALLING, callback=rightEncoderCallback)
# disabling event handlers
def disableEncoderTracking():
GPIO.remove_event_detect(leftEncoderGPIO)
GPIO.remove_event_detect(rightEncoderGPIO)
# ----------------------
# Motor stuff
# ----------------------
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
# create a default motor object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT(addr=0x60)
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
# user motor 1 and 2 on RasPi hat
myMotor1 = mh.getMotor(1)
myMotor2 = mh.getMotor(2)
# turn off motors
myMotor1.run(Adafruit_MotorHAT.RELEASE);
myMotor2.run(Adafruit_MotorHAT.RELEASE);
# set the speed (from 0 (off) to 255 (max speed))
startSpeed = 100
maxSpeed = 255 # max is 255!
# test switch
fullSpeedDuration = 0 # default 0
myMotor1.setSpeed(startSpeed)
myMotor2.setSpeed(startSpeed)
# ------------------
# my signal handler
# ------------------
def sig_handler(_signo, _stack_frame):
turnOffMotors();
## GPIO cleanup
GPIO.remove_event_detect(leftEncoderGPIO)
GPIO.remove_event_detect(rightEncoderGPIO)
GPIO.cleanup()
print("\n")
print(str(leftSteps) + " left steps are " + str(leftDistance) + " cm driven.")
print(str(rightSteps) + " right steps are " + str(rightDistance) + " cm driven.\n")
sys.exit(0)
# signals to be handled
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGHUP, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
#
# Cowntdown
#
print('Starting in 3...')
time.sleep(1)
print('Starting in 2...')
time.sleep(1)
print('Starting in 1...')
time.sleep(1)
print('GO!\n')
######
###### forever - or until ctrl+c :)
######
while (True):
### drive
# drive
print("Forward! ")
# enable Odometrie
enableEncoderTracking()
myMotor1.run(Adafruit_MotorHAT.FORWARD)
myMotor2.run(Adafruit_MotorHAT.FORWARD)
print("\tSpeed up...")
for i in range(startSpeed, maxSpeed):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
# full speed for n seconds
print("+++ full speed for " + str(fullSpeedDuration) + " seconds +++")
time.sleep(fullSpeedDuration)
print("\tSlow down...")
for i in range(maxSpeed, startSpeed, -1):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
# disable Odometrie
disableEncoderTracking()
# wait one second
time.sleep(1)
""" print("Backward! ")
myMotor1.run(Adafruit_MotorHAT.BACKWARD)
myMotor2.run(Adafruit_MotorHAT.BACKWARD)
print("\tSpeed up...")
for i in range(startSpeed, maxSpeed):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
print("\tSlow down...")
for i in range(maxSpeed, startSpeed, -1):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
print("Release")
myMotor1.run(Adafruit_MotorHAT.RELEASE)
myMotor2.run(Adafruit_MotorHAT.RELEASE)
"""
# wait some time
time.sleep(0.25)
|
normal
|
{
"blob_id": "53841ba56589955e09b03018af1d0ae79b3756c4",
"index": 5595,
"step-1": "<mask token>\n\n\ndef leftEncoderCallback(answer):\n global leftSteps\n leftSteps = leftSteps + 1\n global leftDistance\n leftDistance = leftDistance + 0.24\n print('Left Encoder.')\n\n\ndef rightEncoderCallback(answer):\n global rightSteps\n rightSteps = rightSteps + 1\n global rightDistance\n rightDistance = rightDistance + 0.24\n print('Right Encoder.')\n\n\n<mask token>\n\n\ndef disableEncoderTracking():\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n\n\n<mask token>\n\n\ndef turnOffMotors():\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\n\n<mask token>\n\n\ndef sig_handler(_signo, _stack_frame):\n turnOffMotors()\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n GPIO.cleanup()\n print('\\n')\n print(str(leftSteps) + ' left steps are ' + str(leftDistance) +\n ' cm driven.')\n print(str(rightSteps) + ' right steps are ' + str(rightDistance) +\n ' cm driven.\\n')\n sys.exit(0)\n\n\n<mask token>\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\nprint('setup...')\nGPIO.setup(leftEncoderGPIO, GPIO.IN)\nGPIO.setup(rightEncoderGPIO, GPIO.IN)\n<mask token>\n\n\ndef leftEncoderCallback(answer):\n global leftSteps\n leftSteps = leftSteps + 1\n global leftDistance\n leftDistance = leftDistance + 0.24\n print('Left Encoder.')\n\n\ndef rightEncoderCallback(answer):\n global rightSteps\n rightSteps = rightSteps + 1\n global rightDistance\n rightDistance = rightDistance + 0.24\n print('Right Encoder.')\n\n\nprint('registering event handlers...')\n\n\ndef enableEncoderTracking():\n GPIO.add_event_detect(leftEncoderGPIO, GPIO.FALLING, callback=\n leftEncoderCallback)\n GPIO.add_event_detect(rightEncoderGPIO, GPIO.FALLING, callback=\n rightEncoderCallback)\n\n\ndef disableEncoderTracking():\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n\n\n<mask token>\n\n\ndef turnOffMotors():\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\n\n<mask token>\nmyMotor1.run(Adafruit_MotorHAT.RELEASE)\nmyMotor2.run(Adafruit_MotorHAT.RELEASE)\n<mask token>\nmyMotor1.setSpeed(startSpeed)\nmyMotor2.setSpeed(startSpeed)\n\n\ndef sig_handler(_signo, _stack_frame):\n turnOffMotors()\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n GPIO.cleanup()\n print('\\n')\n print(str(leftSteps) + ' left steps are ' + str(leftDistance) +\n ' cm driven.')\n print(str(rightSteps) + ' right steps are ' + str(rightDistance) +\n ' cm driven.\\n')\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, sig_handler)\nsignal.signal(signal.SIGHUP, sig_handler)\nsignal.signal(signal.SIGTERM, sig_handler)\nprint('Starting in 3...')\ntime.sleep(1)\nprint('Starting in 2...')\ntime.sleep(1)\nprint('Starting in 1...')\ntime.sleep(1)\nprint('GO!\\n')\nwhile True:\n print('Forward! ')\n enableEncoderTracking()\n myMotor1.run(Adafruit_MotorHAT.FORWARD)\n myMotor2.run(Adafruit_MotorHAT.FORWARD)\n print('\\tSpeed up...')\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n print('+++ full speed for ' + str(fullSpeedDuration) + ' seconds +++')\n time.sleep(fullSpeedDuration)\n print('\\tSlow down...')\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n disableEncoderTracking()\n time.sleep(1)\n \"\"\" print(\"Backward! \")\n myMotor1.run(Adafruit_MotorHAT.BACKWARD)\n myMotor2.run(Adafruit_MotorHAT.BACKWARD)\n\n print(\"\tSpeed up...\")\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"\tSlow down...\")\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"Release\")\n myMotor1.run(Adafruit_MotorHAT.RELEASE)\n myMotor2.run(Adafruit_MotorHAT.RELEASE)\n \"\"\"\n time.sleep(0.25)\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\nleftEncoderGPIO = 27\nrightEncoderGPIO = 22\nprint('setup...')\nGPIO.setup(leftEncoderGPIO, GPIO.IN)\nGPIO.setup(rightEncoderGPIO, GPIO.IN)\nleftSteps = 0\nrightSteps = 0\nleftDistance = 0\nrightDistance = 0\n\n\ndef leftEncoderCallback(answer):\n global leftSteps\n leftSteps = leftSteps + 1\n global leftDistance\n leftDistance = leftDistance + 0.24\n print('Left Encoder.')\n\n\ndef rightEncoderCallback(answer):\n global rightSteps\n rightSteps = rightSteps + 1\n global rightDistance\n rightDistance = rightDistance + 0.24\n print('Right Encoder.')\n\n\nprint('registering event handlers...')\n\n\ndef enableEncoderTracking():\n GPIO.add_event_detect(leftEncoderGPIO, GPIO.FALLING, callback=\n leftEncoderCallback)\n GPIO.add_event_detect(rightEncoderGPIO, GPIO.FALLING, callback=\n rightEncoderCallback)\n\n\ndef disableEncoderTracking():\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n\n\n<mask token>\nmh = Adafruit_MotorHAT(addr=96)\n\n\ndef turnOffMotors():\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\n\nmyMotor1 = mh.getMotor(1)\nmyMotor2 = mh.getMotor(2)\nmyMotor1.run(Adafruit_MotorHAT.RELEASE)\nmyMotor2.run(Adafruit_MotorHAT.RELEASE)\nstartSpeed = 100\nmaxSpeed = 255\nfullSpeedDuration = 0\nmyMotor1.setSpeed(startSpeed)\nmyMotor2.setSpeed(startSpeed)\n\n\ndef sig_handler(_signo, _stack_frame):\n turnOffMotors()\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n GPIO.cleanup()\n print('\\n')\n print(str(leftSteps) + ' left steps are ' + str(leftDistance) +\n ' cm driven.')\n print(str(rightSteps) + ' right steps are ' + str(rightDistance) +\n ' cm driven.\\n')\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, sig_handler)\nsignal.signal(signal.SIGHUP, sig_handler)\nsignal.signal(signal.SIGTERM, sig_handler)\nprint('Starting in 3...')\ntime.sleep(1)\nprint('Starting in 2...')\ntime.sleep(1)\nprint('Starting in 1...')\ntime.sleep(1)\nprint('GO!\\n')\nwhile True:\n print('Forward! ')\n enableEncoderTracking()\n myMotor1.run(Adafruit_MotorHAT.FORWARD)\n myMotor2.run(Adafruit_MotorHAT.FORWARD)\n print('\\tSpeed up...')\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n print('+++ full speed for ' + str(fullSpeedDuration) + ' seconds +++')\n time.sleep(fullSpeedDuration)\n print('\\tSlow down...')\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n disableEncoderTracking()\n time.sleep(1)\n \"\"\" print(\"Backward! \")\n myMotor1.run(Adafruit_MotorHAT.BACKWARD)\n myMotor2.run(Adafruit_MotorHAT.BACKWARD)\n\n print(\"\tSpeed up...\")\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"\tSlow down...\")\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"Release\")\n myMotor1.run(Adafruit_MotorHAT.RELEASE)\n myMotor2.run(Adafruit_MotorHAT.RELEASE)\n \"\"\"\n time.sleep(0.25)\n",
"step-4": "import time\nimport atexit\nimport signal\nimport sys\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nleftEncoderGPIO = 27\nrightEncoderGPIO = 22\nprint('setup...')\nGPIO.setup(leftEncoderGPIO, GPIO.IN)\nGPIO.setup(rightEncoderGPIO, GPIO.IN)\nleftSteps = 0\nrightSteps = 0\nleftDistance = 0\nrightDistance = 0\n\n\ndef leftEncoderCallback(answer):\n global leftSteps\n leftSteps = leftSteps + 1\n global leftDistance\n leftDistance = leftDistance + 0.24\n print('Left Encoder.')\n\n\ndef rightEncoderCallback(answer):\n global rightSteps\n rightSteps = rightSteps + 1\n global rightDistance\n rightDistance = rightDistance + 0.24\n print('Right Encoder.')\n\n\nprint('registering event handlers...')\n\n\ndef enableEncoderTracking():\n GPIO.add_event_detect(leftEncoderGPIO, GPIO.FALLING, callback=\n leftEncoderCallback)\n GPIO.add_event_detect(rightEncoderGPIO, GPIO.FALLING, callback=\n rightEncoderCallback)\n\n\ndef disableEncoderTracking():\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n\n\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\nmh = Adafruit_MotorHAT(addr=96)\n\n\ndef turnOffMotors():\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\n\nmyMotor1 = mh.getMotor(1)\nmyMotor2 = mh.getMotor(2)\nmyMotor1.run(Adafruit_MotorHAT.RELEASE)\nmyMotor2.run(Adafruit_MotorHAT.RELEASE)\nstartSpeed = 100\nmaxSpeed = 255\nfullSpeedDuration = 0\nmyMotor1.setSpeed(startSpeed)\nmyMotor2.setSpeed(startSpeed)\n\n\ndef sig_handler(_signo, _stack_frame):\n turnOffMotors()\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n GPIO.cleanup()\n print('\\n')\n print(str(leftSteps) + ' left steps are ' + str(leftDistance) +\n ' cm driven.')\n print(str(rightSteps) + ' right steps are ' + str(rightDistance) +\n ' cm driven.\\n')\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, sig_handler)\nsignal.signal(signal.SIGHUP, sig_handler)\nsignal.signal(signal.SIGTERM, sig_handler)\nprint('Starting in 3...')\ntime.sleep(1)\nprint('Starting in 2...')\ntime.sleep(1)\nprint('Starting in 1...')\ntime.sleep(1)\nprint('GO!\\n')\nwhile True:\n print('Forward! ')\n enableEncoderTracking()\n myMotor1.run(Adafruit_MotorHAT.FORWARD)\n myMotor2.run(Adafruit_MotorHAT.FORWARD)\n print('\\tSpeed up...')\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n print('+++ full speed for ' + str(fullSpeedDuration) + ' seconds +++')\n time.sleep(fullSpeedDuration)\n print('\\tSlow down...')\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n disableEncoderTracking()\n time.sleep(1)\n \"\"\" print(\"Backward! \")\n myMotor1.run(Adafruit_MotorHAT.BACKWARD)\n myMotor2.run(Adafruit_MotorHAT.BACKWARD)\n\n print(\"\tSpeed up...\")\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"\tSlow down...\")\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"Release\")\n myMotor1.run(Adafruit_MotorHAT.RELEASE)\n myMotor2.run(Adafruit_MotorHAT.RELEASE)\n \"\"\"\n time.sleep(0.25)\n",
"step-5": "#!/usr/bin/python\n# coding=utf-8\n\nimport time\nimport atexit\n\n# for signal handling\nimport signal\nimport sys\n\n\n# ----------------------\n# Encoder stuff\n# ----------------------\nimport RPi.GPIO as GPIO\n\n# init\nGPIO.setmode(GPIO.BCM) # use the GPIO names, _not_ the pin numbers on the board\n\n# Raspberry Pi pin configuration:\n# pins\t BCM BOARD\nleftEncoderGPIO = 27 # pin\nrightEncoderGPIO = 22 # pin\n\n# setup\nprint(\"setup...\")\nGPIO.setup(leftEncoderGPIO, GPIO.IN)\nGPIO.setup(rightEncoderGPIO, GPIO.IN)\n\n# for counting encoder steps\nleftSteps = 0\nrightSteps = 0\n# driven distance in cm\nleftDistance = 0\nrightDistance = 0\n\n# encoder pulse detection by interrupt\ndef leftEncoderCallback(answer):\n global leftSteps\n leftSteps = leftSteps +1\n # measure distance\n global leftDistance\n leftDistance = leftDistance + 0.24\n print(\"Left Encoder.\")\n\ndef rightEncoderCallback(answer):\n global rightSteps\n rightSteps = rightSteps +1\n global rightDistance\n rightDistance = rightDistance + 0.24\n print(\"Right Encoder.\")\n\n# add GPIO event detectors\nprint(\"registering event handlers...\")\n\n# enabling event handlers (if needed only)\ndef enableEncoderTracking():\n GPIO.add_event_detect(leftEncoderGPIO, GPIO.FALLING, callback=leftEncoderCallback)\n GPIO.add_event_detect(rightEncoderGPIO, GPIO.FALLING, callback=rightEncoderCallback)\n\n# disabling event handlers\ndef disableEncoderTracking():\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n\n\n# ----------------------\n# Motor stuff\n# ----------------------\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\n\n# create a default motor object, no changes to I2C address or frequency\nmh = Adafruit_MotorHAT(addr=0x60)\n\n# recommended for auto-disabling motors on shutdown!\ndef turnOffMotors():\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\n# user motor 1 and 2 on RasPi hat\nmyMotor1 = mh.getMotor(1)\nmyMotor2 = mh.getMotor(2)\n\n# turn off motors\nmyMotor1.run(Adafruit_MotorHAT.RELEASE);\nmyMotor2.run(Adafruit_MotorHAT.RELEASE);\n\n# set the speed (from 0 (off) to 255 (max speed))\nstartSpeed = 100\nmaxSpeed = 255 # max is 255!\n\n# test switch\nfullSpeedDuration = 0 # default 0\n\nmyMotor1.setSpeed(startSpeed)\nmyMotor2.setSpeed(startSpeed)\n\n\n# ------------------\n# my signal handler\n# ------------------\ndef sig_handler(_signo, _stack_frame):\n turnOffMotors();\n ## GPIO cleanup\n GPIO.remove_event_detect(leftEncoderGPIO)\n GPIO.remove_event_detect(rightEncoderGPIO)\n GPIO.cleanup()\n print(\"\\n\")\n print(str(leftSteps) + \" left steps are \" + str(leftDistance) + \" cm driven.\")\n print(str(rightSteps) + \" right steps are \" + str(rightDistance) + \" cm driven.\\n\")\n sys.exit(0)\n\n# signals to be handled\nsignal.signal(signal.SIGINT, sig_handler)\nsignal.signal(signal.SIGHUP, sig_handler)\nsignal.signal(signal.SIGTERM, sig_handler)\n\n\n#\n# Cowntdown\n#\nprint('Starting in 3...')\ntime.sleep(1)\nprint('Starting in 2...')\ntime.sleep(1)\nprint('Starting in 1...')\ntime.sleep(1)\nprint('GO!\\n')\n\n\n######\n###### forever - or until ctrl+c :)\n######\nwhile (True):\n ### drive\n # drive\n print(\"Forward! \")\n # enable Odometrie\n enableEncoderTracking()\n\n myMotor1.run(Adafruit_MotorHAT.FORWARD)\n myMotor2.run(Adafruit_MotorHAT.FORWARD)\n\n print(\"\\tSpeed up...\")\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n # full speed for n seconds\n print(\"+++ full speed for \" + str(fullSpeedDuration) + \" seconds +++\")\n time.sleep(fullSpeedDuration)\n\n print(\"\\tSlow down...\")\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n # disable Odometrie\n disableEncoderTracking()\n\n # wait one second\n time.sleep(1)\n\n \"\"\" print(\"Backward! \")\n myMotor1.run(Adafruit_MotorHAT.BACKWARD)\n myMotor2.run(Adafruit_MotorHAT.BACKWARD)\n\n print(\"\\tSpeed up...\")\n for i in range(startSpeed, maxSpeed):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"\\tSlow down...\")\n for i in range(maxSpeed, startSpeed, -1):\n myMotor1.setSpeed(i)\n myMotor2.setSpeed(i)\n time.sleep(0.01)\n\n print(\"Release\")\n myMotor1.run(Adafruit_MotorHAT.RELEASE)\n myMotor2.run(Adafruit_MotorHAT.RELEASE)\n \"\"\"\n\n # wait some time\n time.sleep(0.25)\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(html.read())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
username = ''
link = 'https://www.instagram.com/' + username
html = urllib.request.urlopen(link)
print(html.read())
<|reserved_special_token_1|>
import urllib.request
username = ''
link = 'https://www.instagram.com/' + username
html = urllib.request.urlopen(link)
print(html.read())
<|reserved_special_token_1|>
import urllib.request
username = ''
link = r'https://www.instagram.com/' + username
html = urllib.request.urlopen(link)
print(html.read())
|
flexible
|
{
"blob_id": "db93de33f537eeaf64ca8e2b2b79aba1f592305b",
"index": 5434,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(html.read())\n",
"step-3": "<mask token>\nusername = ''\nlink = 'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())\n",
"step-4": "import urllib.request\nusername = ''\nlink = 'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())\n",
"step-5": "import urllib.request\n\nusername = ''\nlink = r'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Contactus, ContactusAdmin),
admin.site.register(Company, CompanyAdmin),
admin.site.register(Products, ProductsAdmin),
admin.site.register(Brands, BrandsAdmin),
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Contactus, ContactusAdmin, Company, CompanyAdmin, Products, ProductsAdmin, Brands, BrandsAdmin
admin.site.register(Contactus, ContactusAdmin),
admin.site.register(Company, CompanyAdmin),
admin.site.register(Products, ProductsAdmin),
admin.site.register(Brands, BrandsAdmin),
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Contactus,ContactusAdmin,Company,CompanyAdmin,Products,ProductsAdmin,Brands,BrandsAdmin
# Register your models here.
admin.site.register(Contactus,ContactusAdmin),
admin.site.register(Company,CompanyAdmin),
admin.site.register(Products,ProductsAdmin),
admin.site.register(Brands,BrandsAdmin),
|
flexible
|
{
"blob_id": "9586dc118be4388491770d823a38e8068e3b91cb",
"index": 5960,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Contactus, ContactusAdmin),\nadmin.site.register(Company, CompanyAdmin),\nadmin.site.register(Products, ProductsAdmin),\nadmin.site.register(Brands, BrandsAdmin),\n",
"step-3": "from django.contrib import admin\nfrom .models import Contactus, ContactusAdmin, Company, CompanyAdmin, Products, ProductsAdmin, Brands, BrandsAdmin\nadmin.site.register(Contactus, ContactusAdmin),\nadmin.site.register(Company, CompanyAdmin),\nadmin.site.register(Products, ProductsAdmin),\nadmin.site.register(Brands, BrandsAdmin),\n",
"step-4": "from django.contrib import admin\nfrom .models import Contactus,ContactusAdmin,Company,CompanyAdmin,Products,ProductsAdmin,Brands,BrandsAdmin\n# Register your models here.\n\nadmin.site.register(Contactus,ContactusAdmin),\nadmin.site.register(Company,CompanyAdmin),\nadmin.site.register(Products,ProductsAdmin),\nadmin.site.register(Brands,BrandsAdmin),",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from copy import copy
from openprocurement.api.utils import (
json_view,
context_unpack,
APIResource,
get_now,
)
from openprocurement.tender.core.utils import save_tender, apply_patch
from openprocurement.tender.core.validation import (
validate_requirement_data,
validate_patch_requirement_data,
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_change_requirement_objects,
validate_put_requirement_objects,
)
class BaseTenderCriteriaRGRequirementResource(APIResource):
@json_view(
content_type="application/json",
validators=(
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_requirement_data,
),
permission="edit_tender"
)
def collection_post(self):
requirement = self.request.validated["requirement"]
self.request.context.requirements.append(requirement)
tender = self.request.validated["tender"]
if (
self.request.authenticated_role == "tender_owner"
and tender.status == "active.tendering"
and hasattr(tender, "invalidate_bids_data")
):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info(
"Created requirement group requirement {}".format(requirement.id),
extra=context_unpack(
self.request,
{"MESSAGE_ID": "requirement_group_requirement_create"},
{"requirement_id": requirement.id},
),
)
self.request.response.status = 201
self.request.response.headers["Location"] = self.request.route_url(
"{}:Requirement Group Requirement".format(self.request.validated["tender"].procurementMethodType),
tender_id=self.request.validated["tender_id"],
criterion_id=self.request.validated["criterion"].id,
requirement_group_id=self.request.validated["requirement_group"].id,
requirement_id=requirement.id
)
return {"data": requirement.serialize("view")}
@json_view(permission="view_tender")
def collection_get(self):
return {"data": [i.serialize("view") for i in self.request.context.requirements]}
@json_view(permission="view_tender")
def get(self):
return {"data": self.request.validated["requirement"].serialize("view")}
@json_view(
content_type="application/json",
validators=(
validate_change_requirement_objects,
validate_patch_requirement_data,
),
permission="edit_tender"
)
def patch(self):
requirement = self.request.context
apply_patch(self.request, save=False, src=requirement.serialize())
tender = self.request.validated["tender"]
if self.request.authenticated_role == "tender_owner" and hasattr(tender, "invalidate_bids_data"):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info(
"Updated {}".format(requirement.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "requirement_group_requirement_patch"}),
)
return {"data": requirement.serialize("view")}
@json_view(
content_type="application/json",
validators=(
validate_put_requirement_objects,
validate_patch_requirement_data,
),
permission="edit_tender"
)
def put(self):
old_requirement = self.request.context
requirement = old_requirement
if self.request.validated["data"].get("status") != "cancelled":
model = type(old_requirement)
data = copy(self.request.validated["data"])
for attr_name in type(old_requirement)._fields:
if data.get(attr_name) is None:
data[attr_name] = getattr(old_requirement, attr_name)
# To avoid new version creation if no changes and only id's were regenerated
if "eligibleEvidences" not in self.request.json.get("data", {}):
data["eligibleEvidences"] = [
evidence.to_primitive(role="create") for evidence in getattr(old_requirement, "eligibleEvidences")
]
requirement = model(data)
if old_requirement.to_primitive() == requirement.to_primitive():
return {"data": (old_requirement.serialize("view"),)}
requirement.datePublished = get_now()
requirement.dateModified = None
self.request.validated["requirement_group"].requirements.append(requirement)
if old_requirement.status == "active":
old_requirement.status = "cancelled"
old_requirement.dateModified = get_now()
tender = self.request.validated["tender"]
if (
self.request.authenticated_role == "tender_owner"
and tender.status == "active.tendering"
and hasattr(tender, "invalidate_bids_data")
):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info(
"New version of requirement {}".format(requirement.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "requirement_group_requirement_put"}),
)
return {"data": (requirement.serialize("view"), old_requirement.serialize("view_old"))}
|
normal
|
{
"blob_id": "6194079dd506553b4e5b66f1fb92bb8642704b59",
"index": 6893,
"step-1": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n <mask token>\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n <mask token>\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-2": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n <mask token>\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-3": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n\n @json_view(permission='view_tender')\n def get(self):\n return {'data': self.request.validated['requirement'].serialize('view')\n }\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-4": "from copy import copy\nfrom openprocurement.api.utils import json_view, context_unpack, APIResource, get_now\nfrom openprocurement.tender.core.utils import save_tender, apply_patch\nfrom openprocurement.tender.core.validation import validate_requirement_data, validate_patch_requirement_data, validate_operation_ecriteria_objects, validate_patch_exclusion_ecriteria_objects, validate_change_requirement_objects, validate_put_requirement_objects\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n\n @json_view(permission='view_tender')\n def get(self):\n return {'data': self.request.validated['requirement'].serialize('view')\n }\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom openprocurement.api.utils import (\n json_view,\n context_unpack,\n APIResource,\n get_now,\n)\nfrom openprocurement.tender.core.utils import save_tender, apply_patch\nfrom openprocurement.tender.core.validation import (\n validate_requirement_data,\n validate_patch_requirement_data,\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_change_requirement_objects,\n validate_put_requirement_objects,\n)\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def collection_post(self):\n\n requirement = self.request.validated[\"requirement\"]\n self.request.context.requirements.append(requirement)\n tender = self.request.validated[\"tender\"]\n if (\n self.request.authenticated_role == \"tender_owner\"\n and tender.status == \"active.tendering\"\n and hasattr(tender, \"invalidate_bids_data\")\n ):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"Created requirement group requirement {}\".format(requirement.id),\n extra=context_unpack(\n self.request,\n {\"MESSAGE_ID\": \"requirement_group_requirement_create\"},\n {\"requirement_id\": requirement.id},\n ),\n )\n self.request.response.status = 201\n self.request.response.headers[\"Location\"] = self.request.route_url(\n \"{}:Requirement Group Requirement\".format(self.request.validated[\"tender\"].procurementMethodType),\n tender_id=self.request.validated[\"tender_id\"],\n criterion_id=self.request.validated[\"criterion\"].id,\n requirement_group_id=self.request.validated[\"requirement_group\"].id,\n requirement_id=requirement.id\n )\n return {\"data\": requirement.serialize(\"view\")}\n\n @json_view(permission=\"view_tender\")\n def collection_get(self):\n return {\"data\": [i.serialize(\"view\") for i in self.request.context.requirements]}\n\n @json_view(permission=\"view_tender\")\n def get(self):\n return {\"data\": self.request.validated[\"requirement\"].serialize(\"view\")}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated[\"tender\"]\n\n if self.request.authenticated_role == \"tender_owner\" and hasattr(tender, \"invalidate_bids_data\"):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"Updated {}\".format(requirement.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_group_requirement_patch\"}),\n )\n return {\"data\": requirement.serialize(\"view\")}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_put_requirement_objects,\n validate_patch_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated[\"data\"].get(\"status\") != \"cancelled\":\n model = type(old_requirement)\n data = copy(self.request.validated[\"data\"])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n # To avoid new version creation if no changes and only id's were regenerated\n if \"eligibleEvidences\" not in self.request.json.get(\"data\", {}):\n data[\"eligibleEvidences\"] = [\n evidence.to_primitive(role=\"create\") for evidence in getattr(old_requirement, \"eligibleEvidences\")\n ]\n\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {\"data\": (old_requirement.serialize(\"view\"),)}\n\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated[\"requirement_group\"].requirements.append(requirement)\n\n if old_requirement.status == \"active\":\n old_requirement.status = \"cancelled\"\n old_requirement.dateModified = get_now()\n\n tender = self.request.validated[\"tender\"]\n if (\n self.request.authenticated_role == \"tender_owner\"\n and tender.status == \"active.tendering\"\n and hasattr(tender, \"invalidate_bids_data\")\n ):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"New version of requirement {}\".format(requirement.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_group_requirement_put\"}),\n )\n return {\"data\": (requirement.serialize(\"view\"), old_requirement.serialize(\"view_old\"))}\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def test_norm_cdf_dartmouth():
"""
Examples taken from:
https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal
stored in literature directory as dartmouth_normcdf_norminv.pdf
"""
assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_norm_cdf_basic_sanity():
assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))
def test_norm_cdf_dartmouth():
"""
Examples taken from:
https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal
stored in literature directory as dartmouth_normcdf_norminv.pdf
"""
assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_norm_cdf_basic_sanity():
assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))
def test_norm_cdf_dartmouth():
"""
Examples taken from:
https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal
stored in literature directory as dartmouth_normcdf_norminv.pdf
"""
assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)
if __name__ == '__main__':
import pytest
pytest.main([__file__])
<|reserved_special_token_1|>
from numpy.testing import assert_almost_equal
from fastats.maths.norm_cdf import norm_cdf
def test_norm_cdf_basic_sanity():
assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))
def test_norm_cdf_dartmouth():
"""
Examples taken from:
https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal
stored in literature directory as dartmouth_normcdf_norminv.pdf
"""
assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
flexible
|
{
"blob_id": "0229783467b8bcd0361baf6be07e3261f34220c7",
"index": 6581,
"step-1": "<mask token>\n\n\ndef test_norm_cdf_dartmouth():\n \"\"\"\n Examples taken from:\n https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal\n stored in literature directory as dartmouth_normcdf_norminv.pdf\n \"\"\"\n assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_norm_cdf_basic_sanity():\n assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))\n\n\ndef test_norm_cdf_dartmouth():\n \"\"\"\n Examples taken from:\n https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal\n stored in literature directory as dartmouth_normcdf_norminv.pdf\n \"\"\"\n assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_norm_cdf_basic_sanity():\n assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))\n\n\ndef test_norm_cdf_dartmouth():\n \"\"\"\n Examples taken from:\n https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal\n stored in literature directory as dartmouth_normcdf_norminv.pdf\n \"\"\"\n assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main([__file__])\n",
"step-4": "from numpy.testing import assert_almost_equal\nfrom fastats.maths.norm_cdf import norm_cdf\n\n\ndef test_norm_cdf_basic_sanity():\n assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))\n\n\ndef test_norm_cdf_dartmouth():\n \"\"\"\n Examples taken from:\n https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal\n stored in literature directory as dartmouth_normcdf_norminv.pdf\n \"\"\"\n assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main([__file__])\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
<|reserved_special_token_0|>
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda : test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda : 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual((passed_object,), callable_object.
passed_values())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
def passed_values(self):
with self._lock:
return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda : test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda : 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual((passed_object,), callable_object.
passed_values())
if __name__ == '__main__':
unittest.main(verbosity=2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_POOL_SIZE = 16
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
def passed_values(self):
with self._lock:
return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda : test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda : 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual((passed_object,), callable_object.
passed_values())
if __name__ == '__main__':
unittest.main(verbosity=2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import threading
import unittest
from grpc.framework.foundation import logging_pool
_POOL_SIZE = 16
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
def passed_values(self):
with self._lock:
return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda : test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda : 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual((passed_object,), callable_object.
passed_values())
if __name__ == '__main__':
unittest.main(verbosity=2)
<|reserved_special_token_1|>
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grpc.framework.foundation.logging_pool."""
import threading
import unittest
from grpc.framework.foundation import logging_pool
_POOL_SIZE = 16
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
def passed_values(self):
with self._lock:
return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda: test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda: 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual(
(passed_object,), callable_object.passed_values()
)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
flexible
|
{
"blob_id": "049950bd4bbf7903218bb8fb3a4c91492d6af17b",
"index": 3252,
"step-1": "<mask token>\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n <mask token>\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"step-3": "<mask token>\n_POOL_SIZE = 16\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"step-4": "<mask token>\nimport threading\nimport unittest\nfrom grpc.framework.foundation import logging_pool\n_POOL_SIZE = 16\n\n\nclass _CallableObject(object):\n\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda : test_list.append(object())).result()\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda : 1 / 0).exception()\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual((passed_object,), callable_object.\n passed_values())\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"step-5": "# Copyright 2015 gRPC authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for grpc.framework.foundation.logging_pool.\"\"\"\n\nimport threading\nimport unittest\n\nfrom grpc.framework.foundation import logging_pool\n\n_POOL_SIZE = 16\n\n\nclass _CallableObject(object):\n def __init__(self):\n self._lock = threading.Lock()\n self._passed_values = []\n\n def __call__(self, value):\n with self._lock:\n self._passed_values.append(value)\n\n def passed_values(self):\n with self._lock:\n return tuple(self._passed_values)\n\n\nclass LoggingPoolTest(unittest.TestCase):\n def testUpAndDown(self):\n pool = logging_pool.pool(_POOL_SIZE)\n pool.shutdown(wait=True)\n\n with logging_pool.pool(_POOL_SIZE) as pool:\n self.assertIsNotNone(pool)\n\n def testTaskExecuted(self):\n test_list = []\n\n with logging_pool.pool(_POOL_SIZE) as pool:\n pool.submit(lambda: test_list.append(object())).result()\n\n self.assertTrue(test_list)\n\n def testException(self):\n with logging_pool.pool(_POOL_SIZE) as pool:\n raised_exception = pool.submit(lambda: 1 / 0).exception()\n\n self.assertIsNotNone(raised_exception)\n\n def testCallableObjectExecuted(self):\n callable_object = _CallableObject()\n passed_object = object()\n with logging_pool.pool(_POOL_SIZE) as pool:\n future = pool.submit(callable_object, passed_object)\n self.assertIsNone(future.result())\n self.assertSequenceEqual(\n (passed_object,), callable_object.passed_values()\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.