code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import json
import logging
from typing import Dict, Optional
import requests
from requests.auth import HTTPBasicAuth
from requests.models import Response
log = logging.getLogger(__name__)
class NoGithubCredentials(Exception):
pass
class BasicAuthRequester:
"""
Object used for issuing authenticated API calls.
"""
def __init__(self, username: str, password: str) -> None:
self.username = username
self.password = password
def get_auth(self) -> Optional[HTTPBasicAuth]:
if self.username and self.password:
return HTTPBasicAuth(self.username, self.password)
return None
def get(self, url: str) -> Response:
log.debug("Fetching %s", url)
response = requests.get(url, auth=self.get_auth())
if response.status_code > 400:
log.warning("Error on GET to %s. Response: %s", url, response.content)
return response
def delete(self, url):
log.debug("Deleting %s", url)
return requests.delete(url, auth=self.get_auth())
def post(self, url: str, payload: Dict) -> Response:
log.debug("Posting %s to %s", payload, url)
response = requests.post(url, data=json.dumps(payload), auth=self.get_auth())
if response.status_code > 400:
log.warning("Error on POST to %s. Response: %s", url, response.content)
return response
| [
"logging.getLogger",
"json.dumps",
"requests.auth.HTTPBasicAuth"
] | [((162, 189), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (179, 189), False, 'import logging\n'), ((580, 623), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['self.username', 'self.password'], {}), '(self.username, self.password)\n', (593, 623), False, 'from requests.auth import HTTPBasicAuth\n'), ((1207, 1226), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1217, 1226), False, 'import json\n')] |
#!/usr/bin/env python3
import argparse
import csv
import sys
from pathlib import Path
from typing import Dict, Set, List
from util import utils
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("csv_path", help="Path to function CSV to merge")
args = parser.parse_args()
csv_path = Path(args.csv_path)
known_fn_addrs: Set[int] = {func.addr for func in utils.get_functions(all=True)}
names: Dict[int, str] = {func.addr: func.name for func in utils.get_functions(all=True)}
new_fns: List[utils.FunctionInfo] = []
for func in utils.get_functions(csv_path, all=True):
if func.addr in known_fn_addrs:
if not names[func.addr].startswith("_") and not func.name.startswith("_Z"):
names[func.addr] = func.name
else:
new_fns.append(func)
new_fn_list: List[utils.FunctionInfo] = []
new_fn_list.extend(utils.get_functions(all=True))
new_fn_list.extend(new_fns)
new_fn_list.sort(key=lambda func: func.addr)
# Output the modified function CSV.
writer = csv.writer(sys.stdout, lineterminator="\n")
writer.writerow("Address,Quality,Size,Name".split(","))
for func in new_fn_list:
if func.addr in names:
func.raw_row[3] = names[func.addr]
writer.writerow(func.raw_row)
if __name__ == "__main__":
main()
| [
"csv.writer",
"util.utils.get_functions",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((181, 206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (204, 206), False, 'import argparse\n'), ((328, 347), 'pathlib.Path', 'Path', (['args.csv_path'], {}), '(args.csv_path)\n', (332, 347), False, 'from pathlib import Path\n'), ((586, 625), 'util.utils.get_functions', 'utils.get_functions', (['csv_path'], {'all': '(True)'}), '(csv_path, all=True)\n', (605, 625), False, 'from util import utils\n'), ((1084, 1127), 'csv.writer', 'csv.writer', (['sys.stdout'], {'lineterminator': '"""\n"""'}), "(sys.stdout, lineterminator='\\n')\n", (1094, 1127), False, 'import csv\n'), ((918, 947), 'util.utils.get_functions', 'utils.get_functions', ([], {'all': '(True)'}), '(all=True)\n', (937, 947), False, 'from util import utils\n'), ((403, 432), 'util.utils.get_functions', 'utils.get_functions', ([], {'all': '(True)'}), '(all=True)\n', (422, 432), False, 'from util import utils\n'), ((496, 525), 'util.utils.get_functions', 'utils.get_functions', ([], {'all': '(True)'}), '(all=True)\n', (515, 525), False, 'from util import utils\n')] |
# Global import
from flask import Flask
# Local Import
from main.routes import v1
server = Flask(__name__)
server.register_blueprint(v1.v1_router)
| [
"flask.Flask"
] | [((93, 108), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (98, 108), False, 'from flask import Flask\n')] |
from datetime import datetime, timedelta
from tests.app.domain.models import Batch, OrderLine, allocate
today = datetime.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
def test_prefers_current_stock_batches_to_shipments() -> None:
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
line = OrderLine("oref", "RETRO-CLOCK", 10)
allocate(line, [in_stock_batch, shipment_batch])
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_earlier_batches() -> None:
earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today)
medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow)
latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later)
line = OrderLine("order1", "MINIMALIST-SPOON", 10)
allocate(line, [medium, earliest, latest])
assert earliest.available_quantity == 90
assert medium.available_quantity == 100
assert latest.available_quantity == 100
def test_returns_allocated_batch_ref() -> None:
in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None)
shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow)
line = OrderLine("oref", "HIGHBROW-POSTER", 10)
allocation = allocate(line, [in_stock_batch, shipment_batch])
assert allocation == in_stock_batch.reference
| [
"tests.app.domain.models.OrderLine",
"tests.app.domain.models.Batch",
"tests.app.domain.models.allocate",
"datetime.datetime.today",
"datetime.timedelta"
] | [((114, 130), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (128, 130), False, 'from datetime import datetime, timedelta\n'), ((150, 167), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (159, 167), False, 'from datetime import datetime, timedelta\n'), ((187, 205), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (196, 205), False, 'from datetime import datetime, timedelta\n'), ((292, 345), 'tests.app.domain.models.Batch', 'Batch', (['"""in-stock-batch"""', '"""RETRO-CLOCK"""', '(100)'], {'eta': 'None'}), "('in-stock-batch', 'RETRO-CLOCK', 100, eta=None)\n", (297, 345), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((367, 424), 'tests.app.domain.models.Batch', 'Batch', (['"""shipment-batch"""', '"""RETRO-CLOCK"""', '(100)'], {'eta': 'tomorrow'}), "('shipment-batch', 'RETRO-CLOCK', 100, eta=tomorrow)\n", (372, 424), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((436, 472), 'tests.app.domain.models.OrderLine', 'OrderLine', (['"""oref"""', '"""RETRO-CLOCK"""', '(10)'], {}), "('oref', 'RETRO-CLOCK', 10)\n", (445, 472), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((478, 526), 'tests.app.domain.models.allocate', 'allocate', (['line', '[in_stock_batch, shipment_batch]'], {}), '(line, [in_stock_batch, shipment_batch])\n', (486, 526), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((692, 749), 'tests.app.domain.models.Batch', 'Batch', (['"""speedy-batch"""', '"""MINIMALIST-SPOON"""', '(100)'], {'eta': 'today'}), "('speedy-batch', 'MINIMALIST-SPOON', 100, eta=today)\n", (697, 749), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((763, 823), 'tests.app.domain.models.Batch', 'Batch', (['"""normal-batch"""', '"""MINIMALIST-SPOON"""', '(100)'], {'eta': 'tomorrow'}), "('normal-batch', 'MINIMALIST-SPOON', 100, eta=tomorrow)\n", (768, 823), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((837, 892), 'tests.app.domain.models.Batch', 'Batch', (['"""slow-batch"""', '"""MINIMALIST-SPOON"""', '(100)'], {'eta': 'later'}), "('slow-batch', 'MINIMALIST-SPOON', 100, eta=later)\n", (842, 892), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((904, 947), 'tests.app.domain.models.OrderLine', 'OrderLine', (['"""order1"""', '"""MINIMALIST-SPOON"""', '(10)'], {}), "('order1', 'MINIMALIST-SPOON', 10)\n", (913, 947), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((953, 995), 'tests.app.domain.models.allocate', 'allocate', (['line', '[medium, earliest, latest]'], {}), '(line, [medium, earliest, latest])\n', (961, 995), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((1201, 1262), 'tests.app.domain.models.Batch', 'Batch', (['"""in-stock-batch-ref"""', '"""HIGHBROW-POSTER"""', '(100)'], {'eta': 'None'}), "('in-stock-batch-ref', 'HIGHBROW-POSTER', 100, eta=None)\n", (1206, 1262), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((1284, 1349), 'tests.app.domain.models.Batch', 'Batch', (['"""shipment-batch-ref"""', '"""HIGHBROW-POSTER"""', '(100)'], {'eta': 'tomorrow'}), "('shipment-batch-ref', 'HIGHBROW-POSTER', 100, eta=tomorrow)\n", (1289, 1349), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((1361, 1401), 'tests.app.domain.models.OrderLine', 'OrderLine', (['"""oref"""', '"""HIGHBROW-POSTER"""', '(10)'], {}), "('oref', 'HIGHBROW-POSTER', 10)\n", (1370, 1401), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n'), ((1419, 1467), 'tests.app.domain.models.allocate', 'allocate', (['line', '[in_stock_batch, shipment_batch]'], {}), '(line, [in_stock_batch, shipment_batch])\n', (1427, 1467), False, 'from tests.app.domain.models import Batch, OrderLine, allocate\n')] |
from CONSTANTS import ERR_CODE_COMMAND_LINE_ARGS, WARNING_CODE_INVALID_FORMAT, ERR_CODE_NON_EXISTING_DIRECTORY, \
WARNING_CODE_NO_PAIR_FOUND, ERR_CODE_CREATING_XML, TMX_MIN_ARGS
from utilities.error_manager import run_warning, run_error
from utilities.file_handler import get_file_content_safe, get_output_file_name
from processors.tmx_xml_vtt_handler import create_tmx_from_dicts
from utilities import printing_utilities as pu
import time
import os
def _create_dict(lines):
"""
Creates formatted dictionaries for processing and creating .tmx files.
The form of the dictionary is {timestamp: (paragraph_id, text), ...}
Arguments:
lines: lines of text from a .srt file
Returns:
Dictionary that is formatted for processing and creating .tmx files.
"""
ret = {}
idx = 0
# -- Idx specifies one line to process
while idx < len(lines):
# -- Skip any blank lines
if lines[idx].strip() == '':
idx += 1
continue
# -- Get the id of this line
# -- The id is just the number from a .srt file paragraph
local_id = lines[idx].strip()
idx += 1
# -- Get the timestamp from the paragraph
timestamp = lines[idx].strip()
idx += 1
# -- Fetch all content until the first empty line
content = ""
while len(lines) > idx and lines[idx].strip() != '':
content += lines[idx]
idx += 1
# -- If anything that was parsed is actually blank, toss it and
# inform the user of the warning
if local_id.strip() == '' or timestamp.strip() == '' or \
content.strip() == '':
run_warning(WARNING_CODE_INVALID_FORMAT,
[local_id, timestamp, content])
continue
# -- Add the (id, content) pair on the key of the timestamp because
# it is the only thing that is actually unique
ret[timestamp] = (local_id, content)
idx += 1
return ret
def _process_srt_by_dir(directory_path, src_lang):
"""
Essentially processes .srt files in the same way as _full_srt_process, but
takes an entire directory as the input. Files are taken as pairs, so that
pairs have the same name except the postfix of the filename (not the
extension). The postfix of exactly one of the pairs must be the same as the
src_lang parameter eg. filename-one-EN.srt, filename-one-DE.srt, for a
src_lang parameter of either EN or DE.
Arguments:
directory_path: The directory from which to pull .srt file pairs
src_lang: The language of the source file, and also the postfix of
exactly one of the file pairs
Returns:
None
"""
if not os.path.exists(directory_path):
run_error(ERR_CODE_NON_EXISTING_DIRECTORY, directory_path)
# -- Transform the src_lang to lower at once so that no problems are caused
# later due to casing
src_lang = src_lang.lower()
# -- Filename - {source-file, source-lang, trans-file, trans-lang}
files_to_translate = {}
# -- Create {"path1":"path1", lang1:"lang1",
# "path2":"path2", "lang2":"lang2"} dicts for
# each unique filename with it's trailing
# language sliced off.
for item in os.listdir(directory_path):
filename = item.split(".srt")[0][:-3]
if filename not in files_to_translate:
files_to_translate[filename] = {}
t = {}
file_lang = item.split(".srt")[0][-2:].lower()
if file_lang == src_lang:
path_key = 'path1'
lang_key = 'lang1'
else:
path_key = 'path2'
lang_key = 'lang2'
t[path_key] = os.path.join(directory_path, item)
t[lang_key] = file_lang
files_to_translate[filename].update(t)
files = []
for k, v in files_to_translate.items():
# Check if any files or paths are found by themselves, and raise a
# warning
if not ('path1' in v and
'lang1' in v and
'path2' in v and
'lang2' in v):
run_warning(WARNING_CODE_NO_PAIR_FOUND, k)
continue
# -- If all is well, forward items to the _full_srt_process function
# with predetermined filename
path1 = v['path1']
lang1 = v['lang1']
path2 = v['path2']
lang2 = v['lang2']
filepath = \
_full_srt_process(path1, lang1, path2, lang2, str(k) + ".tmx")
if filepath is not None:
files.append(filepath)
return files
def process_srt(arguments):
"""
Processes arguments for creating tmx files.
Arguments:
arguments: arguments from the command line which must have either 4 or
2 args. The arguments can be seen in the help_script section of the
project.
"""
if arguments is None or len(arguments) not in TMX_MIN_ARGS:
run_error(ERR_CODE_COMMAND_LINE_ARGS)
if len(arguments) == 4:
return _full_srt_process(
arguments[0],
arguments[1],
arguments[2],
arguments[3])
else:
return _process_srt_by_dir(arguments[0], arguments[1])
def _full_srt_process(path1, lang1, path2, lang2, result_name=None):
"""
Creates a .tmx file based on .srt files specified by parh1, and path2,
taking into account the languages specified by lang1 and lang2.
Arguments:
path1: path to the source .srt file
lang1: language of the source .srt file
path2: path to the translation .srt file
lang2: language of the translation .srt file
result_name: (optional) name of the resulting .tmx file. If left blank
the name will be generated based on the current time
Returns:
None
"""
# -- Check both files existence and read all content
pu.display_message('#1 Searching for files and '
'scanning all lines in files ...')
lines_1 = get_file_content_safe(path1)
lines_2 = get_file_content_safe(path2)
pu.display_message('#1 ... Files found and all lines '
'in both files were read!\n')
# -- Create dictionaries from all scanned lines in files
pu.display_message('#2 Creating dictionaries from scanned lines ...')
dict_1 = _create_dict(lines_1)
dict_2 = _create_dict(lines_2)
pu.display_message('#2 ... Created dictionaries from scanned lines!\n')
# -- Create xml file
# -- Create full filepath
pu.display_message("#3 Generating tmx file ...")
if result_name is None:
xml_filename = str(time.time()) + '.tmx'
else:
xml_filename = result_name
full_file_path = get_output_file_name(xml_filename)
pu.display_message("File will be generated in: [" + full_file_path + "]")
# -- Create root
xml_string = create_tmx_from_dicts(dict_1, lang1, dict_2, lang2)
try:
with open(full_file_path, mode='w') as result_file:
result_file.write(xml_string)
except Exception as e:
run_error(ERR_CODE_CREATING_XML, full_file_path, e)
return None
pu.display_message("#3 ... File " + full_file_path + " generated!\n")
return full_file_path
| [
"processors.tmx_xml_vtt_handler.create_tmx_from_dicts",
"os.path.exists",
"os.listdir",
"utilities.error_manager.run_error",
"os.path.join",
"utilities.file_handler.get_file_content_safe",
"utilities.file_handler.get_output_file_name",
"utilities.error_manager.run_warning",
"utilities.printing_utilities.display_message",
"time.time"
] | [((3294, 3320), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (3304, 3320), False, 'import os\n'), ((5905, 5990), 'utilities.printing_utilities.display_message', 'pu.display_message', (['"""#1 Searching for files and scanning all lines in files ..."""'], {}), "('#1 Searching for files and scanning all lines in files ...'\n )\n", (5923, 5990), True, 'from utilities import printing_utilities as pu\n'), ((6026, 6054), 'utilities.file_handler.get_file_content_safe', 'get_file_content_safe', (['path1'], {}), '(path1)\n', (6047, 6054), False, 'from utilities.file_handler import get_file_content_safe, get_output_file_name\n'), ((6069, 6097), 'utilities.file_handler.get_file_content_safe', 'get_file_content_safe', (['path2'], {}), '(path2)\n', (6090, 6097), False, 'from utilities.file_handler import get_file_content_safe, get_output_file_name\n'), ((6102, 6188), 'utilities.printing_utilities.display_message', 'pu.display_message', (['"""#1 ... Files found and all lines in both files were read!\n"""'], {}), "(\n '#1 ... Files found and all lines in both files were read!\\n')\n", (6120, 6188), True, 'from utilities import printing_utilities as pu\n'), ((6276, 6345), 'utilities.printing_utilities.display_message', 'pu.display_message', (['"""#2 Creating dictionaries from scanned lines ..."""'], {}), "('#2 Creating dictionaries from scanned lines ...')\n", (6294, 6345), True, 'from utilities import printing_utilities as pu\n'), ((6420, 6491), 'utilities.printing_utilities.display_message', 'pu.display_message', (['"""#2 ... Created dictionaries from scanned lines!\n"""'], {}), "('#2 ... Created dictionaries from scanned lines!\\n')\n", (6438, 6491), True, 'from utilities import printing_utilities as pu\n'), ((6552, 6600), 'utilities.printing_utilities.display_message', 'pu.display_message', (['"""#3 Generating tmx file ..."""'], {}), "('#3 Generating tmx file ...')\n", (6570, 6600), True, 'from utilities import printing_utilities as pu\n'), ((6744, 6778), 'utilities.file_handler.get_output_file_name', 'get_output_file_name', (['xml_filename'], {}), '(xml_filename)\n', (6764, 6778), False, 'from utilities.file_handler import get_file_content_safe, get_output_file_name\n'), ((6783, 6856), 'utilities.printing_utilities.display_message', 'pu.display_message', (["('File will be generated in: [' + full_file_path + ']')"], {}), "('File will be generated in: [' + full_file_path + ']')\n", (6801, 6856), True, 'from utilities import printing_utilities as pu\n'), ((6895, 6946), 'processors.tmx_xml_vtt_handler.create_tmx_from_dicts', 'create_tmx_from_dicts', (['dict_1', 'lang1', 'dict_2', 'lang2'], {}), '(dict_1, lang1, dict_2, lang2)\n', (6916, 6946), False, 'from processors.tmx_xml_vtt_handler import create_tmx_from_dicts\n'), ((7169, 7238), 'utilities.printing_utilities.display_message', 'pu.display_message', (["('#3 ... File ' + full_file_path + ' generated!\\n')"], {}), "('#3 ... File ' + full_file_path + ' generated!\\n')\n", (7187, 7238), True, 'from utilities import printing_utilities as pu\n'), ((2767, 2797), 'os.path.exists', 'os.path.exists', (['directory_path'], {}), '(directory_path)\n', (2781, 2797), False, 'import os\n'), ((2807, 2865), 'utilities.error_manager.run_error', 'run_error', (['ERR_CODE_NON_EXISTING_DIRECTORY', 'directory_path'], {}), '(ERR_CODE_NON_EXISTING_DIRECTORY, directory_path)\n', (2816, 2865), False, 'from utilities.error_manager import run_warning, run_error\n'), ((3726, 3760), 'os.path.join', 'os.path.join', (['directory_path', 'item'], {}), '(directory_path, item)\n', (3738, 3760), False, 'import os\n'), ((4959, 4996), 'utilities.error_manager.run_error', 'run_error', (['ERR_CODE_COMMAND_LINE_ARGS'], {}), '(ERR_CODE_COMMAND_LINE_ARGS)\n', (4968, 4996), False, 'from utilities.error_manager import run_warning, run_error\n'), ((1702, 1774), 'utilities.error_manager.run_warning', 'run_warning', (['WARNING_CODE_INVALID_FORMAT', '[local_id, timestamp, content]'], {}), '(WARNING_CODE_INVALID_FORMAT, [local_id, timestamp, content])\n', (1713, 1774), False, 'from utilities.error_manager import run_warning, run_error\n'), ((4136, 4178), 'utilities.error_manager.run_warning', 'run_warning', (['WARNING_CODE_NO_PAIR_FOUND', 'k'], {}), '(WARNING_CODE_NO_PAIR_FOUND, k)\n', (4147, 4178), False, 'from utilities.error_manager import run_warning, run_error\n'), ((7093, 7144), 'utilities.error_manager.run_error', 'run_error', (['ERR_CODE_CREATING_XML', 'full_file_path', 'e'], {}), '(ERR_CODE_CREATING_XML, full_file_path, e)\n', (7102, 7144), False, 'from utilities.error_manager import run_warning, run_error\n'), ((6656, 6667), 'time.time', 'time.time', ([], {}), '()\n', (6665, 6667), False, 'import time\n')] |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Detail(models.Model):
"""用户信息名"""
text = models.CharField(max_length=20)
owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
def __str__(self):
"""返回模型的字符串表示"""
return self.text
class Information(models.Model):
"""具体信息"""
title = models.ForeignKey(Detail, on_delete=models.CASCADE, null=True)
text = models.TextField(max_length=50)
class Meta:
verbose_name_plural = 'informations'
def __str__(self):
"""返回模型的字符串表示"""
return self.text | [
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((156, 187), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (172, 187), False, 'from django.db import models\n'), ((200, 260), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(User, on_delete=models.CASCADE, null=True)\n', (217, 260), False, 'from django.db import models\n'), ((396, 458), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Detail'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Detail, on_delete=models.CASCADE, null=True)\n', (413, 458), False, 'from django.db import models\n'), ((470, 501), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (486, 501), False, 'from django.db import models\n')] |
import re
from datetime import timedelta
DURATION_PATTERN = re.compile(
r"((?P<days>[-\d]+) day[s]*, )?(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d[\.\d+]*)"
)
def unique(s):
seen = set()
seen_add = seen.add
return [x for x in s if not (x in seen or seen_add(x))]
def parse_timedelta(s):
m = DURATION_PATTERN.match(s)
d = m.groupdict()
return timedelta(**{k: float(d[k]) for k in d if d[k] is not None})
| [
"re.compile"
] | [((62, 176), 're.compile', 're.compile', (['"""((?P<days>[-\\\\d]+) day[s]*, )?(?P<hours>\\\\d+):(?P<minutes>\\\\d+):(?P<seconds>\\\\d[\\\\.\\\\d+]*)"""'], {}), "(\n '((?P<days>[-\\\\d]+) day[s]*, )?(?P<hours>\\\\d+):(?P<minutes>\\\\d+):(?P<seconds>\\\\d[\\\\.\\\\d+]*)'\n )\n", (72, 176), False, 'import re\n')] |
import numpy as np
import math
w1 = np.array([
[-1],
[-1],
], dtype=float)
w2 = np.array([
[0],
[0],
], dtype=float)
w3 = np.array([
[1],
[1],
], dtype=float)
# 初始化 预处理
C = 1
m, n = w1.shape
w = np.zeros((m + 1, 3))
print("Initial:\n", w)
add_row = np.ones(n)
w1 = np.row_stack((w1, add_row))
w2 = np.row_stack((w2, add_row))
w3 = np.row_stack((w3, add_row))
# w2 = -1 * w2
w1 = np.matrix(w1)
w2 = np.matrix(w2)
w3 = np.matrix(w3)
w = np.matrix(w)
def interate(idx, wn):
changed = False
d = w[idx] * wn
for i in range(0, w.shape[0]):
cur_w = w[i]
if d <= cur_w * wn and i != idx:
changed = True
w[i] -= C * wn.T
if changed:
w[idx] += C * wn.T
return changed
cnt = 0
while True:
changed = False
changed |= interate(0, w1)
changed |= interate(1, w2)
changed |= interate(2, w3)
cnt += 1
if not changed:
break
print(cnt, "times\n ", w) | [
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.row_stack",
"numpy.matrix"
] | [((37, 72), 'numpy.array', 'np.array', (['[[-1], [-1]]'], {'dtype': 'float'}), '([[-1], [-1]], dtype=float)\n', (45, 72), True, 'import numpy as np\n'), ((90, 123), 'numpy.array', 'np.array', (['[[0], [0]]'], {'dtype': 'float'}), '([[0], [0]], dtype=float)\n', (98, 123), True, 'import numpy as np\n'), ((141, 174), 'numpy.array', 'np.array', (['[[1], [1]]'], {'dtype': 'float'}), '([[1], [1]], dtype=float)\n', (149, 174), True, 'import numpy as np\n'), ((223, 243), 'numpy.zeros', 'np.zeros', (['(m + 1, 3)'], {}), '((m + 1, 3))\n', (231, 243), True, 'import numpy as np\n'), ((278, 288), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (285, 288), True, 'import numpy as np\n'), ((294, 321), 'numpy.row_stack', 'np.row_stack', (['(w1, add_row)'], {}), '((w1, add_row))\n', (306, 321), True, 'import numpy as np\n'), ((327, 354), 'numpy.row_stack', 'np.row_stack', (['(w2, add_row)'], {}), '((w2, add_row))\n', (339, 354), True, 'import numpy as np\n'), ((360, 387), 'numpy.row_stack', 'np.row_stack', (['(w3, add_row)'], {}), '((w3, add_row))\n', (372, 387), True, 'import numpy as np\n'), ((409, 422), 'numpy.matrix', 'np.matrix', (['w1'], {}), '(w1)\n', (418, 422), True, 'import numpy as np\n'), ((428, 441), 'numpy.matrix', 'np.matrix', (['w2'], {}), '(w2)\n', (437, 441), True, 'import numpy as np\n'), ((447, 460), 'numpy.matrix', 'np.matrix', (['w3'], {}), '(w3)\n', (456, 460), True, 'import numpy as np\n'), ((465, 477), 'numpy.matrix', 'np.matrix', (['w'], {}), '(w)\n', (474, 477), True, 'import numpy as np\n')] |
"""
Tests for languaged-tagged text value rendering.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import unittest
import re
from annalist.resourcetypes import (
file_extension, file_extension_for_content_type
)
from annalist.views.fields.render_text_language import (
get_text_language_renderer,
TextLanguageValueMapper
)
from .field_rendering_support import FieldRendererTestSupport
class TextLanguageRenderingTest(FieldRendererTestSupport):
def setUp(self):
return
def tearDown(self):
return
# Rendering test
def test_RenderTextLanguageValue(self):
def language_text_value(text, lang):
return {'@value': text, '@language': lang} if lang else {'@value': text}
def expect_render(text, lang):
view_text = "%s (%s)"%(text, lang) if lang else text
render_view = '''<span>%s</span>'''%(view_text)
render_edit = (
'''<input type="text" size="64" name="repeat_prefix_test_field" '''+
'''placeholder="(test placeholder)" '''+
'''value="%s" />'''
)%view_text
return {'view': render_view, 'edit': render_edit}
test_values = (
[ ("text-en", "en", "text-en (en)")
, ("text-en", "en", "text-en (en)")
, ("text-nl", "", "text-nl")
, ("text (en) nl", "", "text (en) nl")
])
test_value_context_renders = (
[ (self._make_test_context(render_text), expect_render(text, lang))
for text, lang, render_text in test_values
])
renderer = get_text_language_renderer()
for render_context, expect_render in test_value_context_renders:
# print repr(render_context['field']['field_value'])
self._check_value_renderer_results(
renderer,
context=render_context,
expect_rendered_view=expect_render['view'],
expect_rendered_edit=expect_render['edit'],
collapse_whitespace=True
)
return
def test_DecodeTextLanguageValue(self):
test_decode_values = (
{ None: { "@value": "" }
, "text": { "@value": "text" }
, "text (en)": { "@value": "text", "@language": "en" }
, "text (en) more": { "@value": "text (en) more" }
, "": { "@value": "" }
})
for valtext, expect_valdata in test_decode_values.items():
valdata = TextLanguageValueMapper.decode(valtext)
self.assertEqual(
valdata, expect_valdata,
"Value decode(%s) = %r, expect %r"%(valtext, valdata, expect_valdata)
)
return
# End.
if __name__ == "__main__":
# import django
# django.setup() # Needed for template loader
# Runtests in this module
# runner = unittest.TextTestRunner(verbosity=2)
# tests = unittest.TestSuite()
# tests = getSuite(select=sel)
# if tests: runner.run(tests)
unittest.main()
| [
"logging.getLogger",
"annalist.views.fields.render_text_language.get_text_language_renderer",
"annalist.views.fields.render_text_language.TextLanguageValueMapper.decode",
"unittest.main"
] | [((327, 354), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (344, 354), False, 'import logging\n'), ((3442, 3457), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3455, 3457), False, 'import unittest\n'), ((1961, 1989), 'annalist.views.fields.render_text_language.get_text_language_renderer', 'get_text_language_renderer', ([], {}), '()\n', (1987, 1989), False, 'from annalist.views.fields.render_text_language import get_text_language_renderer, TextLanguageValueMapper\n'), ((2913, 2952), 'annalist.views.fields.render_text_language.TextLanguageValueMapper.decode', 'TextLanguageValueMapper.decode', (['valtext'], {}), '(valtext)\n', (2943, 2952), False, 'from annalist.views.fields.render_text_language import get_text_language_renderer, TextLanguageValueMapper\n')] |
import turtle
#Create Board
def draw_board(Boardsize, squares_per_row):
'''
Initial Function to Create Board, Based on size of board and squares per row
Also creates 3 dictionaries: Dictionary of all the Red Squares and their positions,
Dictionary of all Black Squares and their positions, and a General dictionary of all
Squares and their positions
'''
sc = turtle.Screen()
sc.setup(Boardsize, Boardsize)
pen = turtle.Turtle()
pen.speed(100)
pen.up()
pen.setpos(-Boardsize/2,-Boardsize/2)
pen.down()
def draw_square(Boardsize, squares_per_row):
for _ in range(4):
pen.forward(Boardsize/squares_per_row)
pen.left(90)
pen.forward(Boardsize/squares_per_row)
def draw_circle(Boardsize, squares_per_row, color):
pen.speed(100)
pen.color(color)
pen.down()
pen.begin_fill()
pen.circle(((Boardsize/squares_per_row)/2)-1)
pen.end_fill()
for row in range(1,squares_per_row+1):
for i in range(squares_per_row):
draw_square(Boardsize, squares_per_row)
pen.up()
pen.setpos((-Boardsize/2), ((-Boardsize/2)+row*(Boardsize/squares_per_row)))
pen.down()
pen.up()
pen.setpos((-Boardsize/2)+.5*(Boardsize/squares_per_row), (-Boardsize/2))
#Red's Pieces
Red = list(range(0,2*squares_per_row))
#Black's Pieces
Black = list(range((squares_per_row*squares_per_row)-(2*squares_per_row),squares_per_row*squares_per_row))
Black_Positions = []
Red_Positions = []
for row in range(1, 3):
for i in range(squares_per_row):
a = round(pen.xcor(),1)
b = round(pen.ycor(),1)
t = tuple([a,b,'normal'])
Red_Positions.append(t)
draw_circle(Boardsize, squares_per_row, 'red')
pen.forward(Boardsize/squares_per_row)
pen.up()
pen.setpos((-Boardsize/2)+.5*(Boardsize/squares_per_row), (-Boardsize/2)+row*(Boardsize/squares_per_row))
pen.up()
pen.setpos((-Boardsize/2)+.5*(Boardsize/squares_per_row), (Boardsize/2)-(2*(Boardsize/squares_per_row)))
for row in range(squares_per_row-1,squares_per_row+1):
for i in range(squares_per_row):
a = round(pen.xcor(),1)
b = round(pen.ycor(),1)
t = tuple([a,b,'normal'])
Black_Positions.append(t)
draw_circle(Boardsize, squares_per_row, 'black')
pen.forward(Boardsize/squares_per_row)
pen.up()
pen.setpos((-Boardsize/2)+.5*(Boardsize/squares_per_row), (-Boardsize/2)+row*(Boardsize/squares_per_row))
Total_Squares = list(range(0,squares_per_row*squares_per_row))
Tuple_Position_List = []
starting_x_cord = (-Boardsize/2)+.5*(Boardsize/squares_per_row)
starting_y_cord = (-Boardsize/2)
a = starting_x_cord
b = starting_y_cord
for i in range(0, squares_per_row):
b+= i*(Boardsize/squares_per_row)
for _ in range(squares_per_row):
tup = tuple([a,b])
Tuple_Position_List.append(tup)
a += Boardsize/squares_per_row
a = starting_x_cord
b = starting_y_cord
Position_Dictionary = dict(zip(Total_Squares, Tuple_Position_List))
Final_Black_Dict = dict(zip(Black, Black_Positions))
Final_Red_Dict = dict(zip(Red, Red_Positions))
return Final_Black_Dict, Final_Red_Dict, Position_Dictionary
A = draw_board(800, 8)
Black = A[0]
print(Black)
Red = A[1]
Total = A[2]
print(Red)
def find_open_spots():
'''
Takes in Black and Red Dictionary of checkers as Global Variables,
checks them against overall dictionary, returns open possible positions to move to
'''
Positions = []
for v in Total.values():
Positions.append(v)
for v in Black.values():
x = tuple([v[0], v[1]])
Positions.remove(x)
for v in Red.values():
x = tuple([v[0], v[1]])
Positions.remove(x)
return Positions
print(find_open_spots())
def find_piece_type(Coordinate):
'''
Creates possible spots to move to for a given piece, Use Dictionary of that piece
and the overall dictionary of open spots
'''
#Access Red and Black Dictionaries
Red_Spots = []
Red_Types = []
Black_Spots = []
Black_Types = []
for v in Red.values():
x = tuple([v[0], v[1]])
Red_Spots.append(x)
Red_Types.append(v[2])
for v in Black.values():
x = tuple([v[0], v[1]])
Black_Spots.append(x)
Black_Types.append(v[2])
Red_Dict = dict(zip(Red_Spots, Red_Types))
Black_Dict = dict(zip(Black_Spots, Black_Types))
if Coordinate in Red_Spots:
for k,v in Red_Dict.items():
if Coordinate == k:
return v
if Coordinate in Black_Spots:
for k,v in Black_Dict.items():
if Coordinate == k:
return v
print(find_piece_type((-350.0, -400.0)))
#Have to create function that teaches movement patterns for specific squares and their classification
# Normal pieces can move forward at a left diagonal, forward, forward at a right diagonal...one space at a time
#If a normal piece jumps another piece, it becomes a king
## Kings can move any direction, one space at a time, if it jumps a piece, it becomes a Super King
### Super Kings can move any direction, 1-3 spaces at a time, but only in one consistent direction, once it jumps
# a piece, it becomes a God
#### Gods can move any direction,1-4 spaces at a time, and are able to change their direction each time they
# move, so they can go forward one, forward diagonal, left, then back....etc, or they can just go forward one
#spot, etc
#The idea is to have the human cursors clicking on the starting pieces coordinates first, then moving to a new
#coordinate, and if the piece can move there, it draws the checker in that spot, and erases the initial checker,
#changes the checker's coordinates, and if necessary updates its status or classification
#Going to have a list before every move, based on the player's turn, of all possible open spots
#When player clicks on spot, it identifies the piece, and the type of piece, then when clicks again,
#It checks whether that particular piece can move to that particular spot,
#If so, it draws that same piece in that new spot, and erases it from the previous spot,
#Otherwise, does nothing
#Normal Movement Class Below
#Normal, for Red, check its key, if key % squares_per_row !=0 or key % (squares_per_row ) !=(squares_per_row -1):
# Then it can move to Key+squares_to_win, Key+squares_to_win -1, or Key + squares_to_win +1
#If key % squares_to_win == 0:
#Then it can move to Key + squares_to_win, and Key+squares_to_win+1
#if Key % (squares_per_row) == (squares_per_row -1):
# it can move to Key+ squares_to_win, and Key+squares_to_win - 1
#Normal for Black, check its key, if key % squares_per_row !=0 or key % (squares_per_row !=(squares_per_row -1):
# Then it can move to Key-squares_to_win, Key-squares_to_win -1, or Key - squares_to_win +1
#If key % squares_to_win == 0:
#Then it can move to Key - squares_to_win, and Key-squares_to_win+1
#if Key % (squares_per_row) == (squares_per_row -1) :
# it can move to Key- squares_to_win, and Key-squares_to_win - 1
#King Movement Class Below
#-boardsize/2, -boardsize/2
#Create Checkers, Red/Black
#Also need to give each checker a current position, based on the center of the checker
#It has to move to a spot that another checker is not currently occupying
#So the function needs to create the checkers, and also create a dictionary of the checkers starting positions
#When a piece moves, basically we check the future position, if it is open, we remove the checker from the current
#position by erasing it's current checker, and then draw it at the new position
#List of open positions, just based on the center of the piece, on the board
#List of checkers and their coordinates/status(coordinates of center)...
#Possible statuses... Off, Normal, King, Super-King, God
#So each piece has a number as its Key, and their value is a list of [Coordinate, status]
#For the user, once piece has been clicked, we check it's coordinate and status to see its possible moves
#possible moves are moves it CAN go to that are unoccupied
# user then clicks on spot he wants piece to move to, if unoccupied it moves
# user can unclick piece, and choose new piece
# once piece moves, its status and coordinates are updated
#First player to knock out all other players pieces wins
# Normal pieces can move forward at a left diagonal, forward, forward at a right diagonal...one space at a time
#If a normal piece jumps another piece, it becomes a king
## Kings can move any direction, one space at a time, if it jumps a piece, it becomes a Super King
### Super Kings can move any direction, 1-3 spaces at a time, but only in one consistent direction, once it jumps
# a piece, it becomes a God
#### Gods can move any direction,1-4 spaces at a time, and are able to change their direction each time they
# move, so they can go forward one, forward diagonal, left, then back....etc, or they can just go forward one
#spot, etc
#TODO Create starting checkers, their coordinates, and create the dictionary that stores them, so function
# will both map and draw the different colored checkers, and it will return a dictionary for each circle
#Then we need a movement function, finds possible positions of each piece, returns an updated dictionary
# based on movement of particular piece, and its interaction with the board and other pieces, so this function
# will move one piece, and return updated dictionary
# And that will be the game, it will continue until players quit, or ALL pieces from one color have their
# status as "OFF", then game will close
#As statuses of pieces change, so will their colors
#King = Blue
#Super King = Green
#God = Orange
turtle.done() | [
"turtle.Screen",
"turtle.done",
"turtle.Turtle"
] | [((10200, 10213), 'turtle.done', 'turtle.done', ([], {}), '()\n', (10211, 10213), False, 'import turtle\n'), ((404, 419), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (417, 419), False, 'import turtle\n'), ((465, 480), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (478, 480), False, 'import turtle\n')] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dialogflow TestCase API interactions."""
import time
import dialogflow_sample as ds
import google.api_core.exceptions
import google.auth
import google.cloud.dialogflowcx as cx
from .client_delegator import ClientDelegator
class DialogflowTestCaseFailure(Exception):
"""Exception to raise when a test case fails"""
class TestCaseDelegator(ClientDelegator):
"""Class for organizing interactions with the Dialogflow TestCases API."""
_CLIENT_CLASS = cx.TestCasesClient
def __init__(self, controller: ds.DialogflowSample, **kwargs) -> None:
self._is_webhook_enabled = kwargs.pop("is_webhook_enabled", False)
self._conversation_turns = kwargs.pop("conversation_turns")
self.expected_exception = kwargs.pop("expected_exception", None)
self._test_case = None
super().__init__(controller, **kwargs)
@property
def test_case(self):
"""Test Case set in Dialogflow."""
if not self._test_case:
raise RuntimeError("Test Case not yet created")
return self._test_case
def setup(self):
"""Initializes the test cases delegator."""
try:
self._test_case = self.client.create_test_case(
parent=self.controller.agent_delegator.agent.name,
test_case=cx.TestCase(
display_name=self.display_name,
test_case_conversation_turns=[
t.get_conversation_turn(self._is_webhook_enabled)
for t in self._conversation_turns
],
test_config=cx.TestConfig(flow=self.controller.start_flow),
),
)
except google.api_core.exceptions.AlreadyExists:
request = cx.ListTestCasesRequest(parent=self.parent)
for curr_test_case in self.client.list_test_cases(request=request):
if curr_test_case.display_name == self.display_name:
request = cx.GetTestCaseRequest(
name=curr_test_case.name,
)
self._test_case = self.client.get_test_case(request=request)
return
def tear_down(self):
"""Destroys the test case."""
request = cx.BatchDeleteTestCasesRequest(
parent=self.parent,
names=[self.test_case.name],
)
try:
self.client.batch_delete_test_cases(request=request)
self._test_case = None
except google.api_core.exceptions.NotFound:
pass
def run_test_case(self, wait=10, max_retries=3):
"""Runs the test case."""
retry_count = 0
result = None
while retry_count < max_retries:
time.sleep(wait)
lro = self.client.run_test_case(
request=cx.RunTestCaseRequest(name=self.test_case.name)
)
while lro.running():
try:
result = lro.result().result
agent_response_differences = [
conversation_turn.virtual_agent_output.differences
for conversation_turn in result.conversation_turns
]
test_case_fail = result.test_result != cx.TestResult.PASSED
if any(agent_response_differences) or test_case_fail:
raise DialogflowTestCaseFailure(
f'Test "{self.test_case.display_name}" failed'
)
return
except google.api_core.exceptions.NotFound as exc:
if str(exc) == (
"404 com.google.apps.framework.request.NotFoundException: "
"NLU model for flow '00000000-0000-0000-0000-000000000000' does not exist. "
"Please try again after retraining the flow."
):
retry_count += 1
raise RuntimeError(f"Retry count exceeded: {retry_count}")
| [
"google.cloud.dialogflowcx.RunTestCaseRequest",
"google.cloud.dialogflowcx.BatchDeleteTestCasesRequest",
"google.cloud.dialogflowcx.GetTestCaseRequest",
"time.sleep",
"google.cloud.dialogflowcx.ListTestCasesRequest",
"google.cloud.dialogflowcx.TestConfig"
] | [((2852, 2931), 'google.cloud.dialogflowcx.BatchDeleteTestCasesRequest', 'cx.BatchDeleteTestCasesRequest', ([], {'parent': 'self.parent', 'names': '[self.test_case.name]'}), '(parent=self.parent, names=[self.test_case.name])\n', (2882, 2931), True, 'import google.cloud.dialogflowcx as cx\n'), ((3336, 3352), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (3346, 3352), False, 'import time\n'), ((2344, 2387), 'google.cloud.dialogflowcx.ListTestCasesRequest', 'cx.ListTestCasesRequest', ([], {'parent': 'self.parent'}), '(parent=self.parent)\n', (2367, 2387), True, 'import google.cloud.dialogflowcx as cx\n'), ((3422, 3469), 'google.cloud.dialogflowcx.RunTestCaseRequest', 'cx.RunTestCaseRequest', ([], {'name': 'self.test_case.name'}), '(name=self.test_case.name)\n', (3443, 3469), True, 'import google.cloud.dialogflowcx as cx\n'), ((2567, 2614), 'google.cloud.dialogflowcx.GetTestCaseRequest', 'cx.GetTestCaseRequest', ([], {'name': 'curr_test_case.name'}), '(name=curr_test_case.name)\n', (2588, 2614), True, 'import google.cloud.dialogflowcx as cx\n'), ((2184, 2230), 'google.cloud.dialogflowcx.TestConfig', 'cx.TestConfig', ([], {'flow': 'self.controller.start_flow'}), '(flow=self.controller.start_flow)\n', (2197, 2230), True, 'import google.cloud.dialogflowcx as cx\n')] |
#!/usr/bin/python
import os
import re
import sys
import getpass
import unicodedata
import copy
from os import stat
from pwd import getpwuid
from .colors import *
from . import log
if __name__ == "__main__":
logger = log.gen(mode="dev")
else:
logger = log.gen()
__author__ = '<NAME>'
# important constants:
ENCODING_INPUT_FILES = "utf8"
COCCURRENCES = PURPLE
CFILE_PATHS = BLUE
CTEXT_FILES = WHITE
# indicator strings
ASK_CONFIRMATION_INDICATORS_STRINGS = ["--ask_confirmation", "--ask"]
CASE_SENSITIVE_INDICATORS_STRINGS = ["--case_sensitive", "--case_respect"]
INITIAL_STRING_INDICATORS_STRINGS = ["--init_str", "--initial", "--init"]
DESTINATION_STRING_INDICATORS_STRINGS = ["--dest_str", "--destination", "--dest"]
DIRECTORY_PATH_TO_APPLY_INDICATORS_STRINGS = ["--directory_path", "--dirpath", "--path"]
LIST_FILES_PATHS_TO_APPLY_INDICATORS_STRINGS = ["--file_paths_list", "--file_paths"]
FILENAME_MUST_END_BY_INDICATORS_STRINGS = ["--file_name_must_end_by", "--end_by"]
NO_ASK_CONFIRMATION_INDICATORS_STRINGS = ["--no_ask_confirmation", "--no_ask"]
CASE_INSENSITIVE_INDICATORS_STRINGS = ["--case_insensitive", "--no_case_respect"]
BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS = ["--extension_filter", "--no_all_extensions"]
NO_BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS = ["--all_extensions", "--no_extension_filter"]
ADD_EXCLUDED_EXTENSIONS_INDICATORS_STRINGS = ["--add_excluded_extensions", "--filter_extensions"]
ADD_EXCLUDED_STRINGS_INDICATORS_STRINGS = ["--add_excluded_strings", "--filter_strings"]
EXCLUDED_PATHS_INDICATORS_STRINGS = ["--excluded_paths"]
NO_BINARY_INDICATORS_STRINGS = ["--binary_exclusion", "--no_binary"]
BINARY_INDICATORS_STRINGS = ["--binary_accepted", "--no_binary_exclusion", "--binary"]
NO_SYMLINK_INDICATORS_STRINGS = ["--symlink_exclusion", "--no_symlink"]
SYMLINK_INDICATORS_STRINGS = ["--symlink_accepted", "--no_symlink_exclusion", "--symlink"]
END_INDICATORS_STRINGS = ["end_last_param", "--end_param", "--end"]
# black list extensions
AUDIO_EXTENSIONS = ["mp3", "MP3", "wav", "WAV", "m4a", "M4A", "aac", "AAC", "mp1", "MP1", "mp2", "MP2", "mpg", "MPG",
"flac", "FLAC"]
IMAGE_EXTENSIONS = ["jpg", "JPG", "jpeg", "JPEG", "png", "PNG", "tif", "TIF", "gif", "GIF", "bmp", "BMP", "pjpeg",
"PJPEG"]
VIDEO_EXTENSIONS = ["mp4", "MP4", "mpeg", "MPEG", "avi", "AVI", "wma", "WMA", "ogg", "OGG", "quicktime", "QUICKTIME",
"webm", "WEBM", "mp2t", "MP2T", "flv", "FLV", "mov", "MOV", "webm", "WEBM", "mkv", "MKV"]
PROGRAMMING_EXTENSIONS = ["class", "CLASS"]
BLACK_LIST_EXTENSIONS_LIST = AUDIO_EXTENSIONS + IMAGE_EXTENSIONS + VIDEO_EXTENSIONS + PROGRAMMING_EXTENSIONS
# supported short indicators
SUPPORTED_SHORT_INDICATORS = ['l', 'r', 's', 'a', 'c']
found_nb = 0
replaced_nb = 0
def _help_requested(arguments):
if len(arguments) == 1 and (arguments[0] == "-h" or arguments[0] == "--help"):
README_path = "/usr/lib/replace/README.md"
f = open(README_path, 'r')
print(CFILE_PATHS + "\n\t####### replace documentation #######\n" + WHITE)
for line in f:
if line == "```sh\n" or line == "```\n" or line == "<pre>\n" or line == "</pre>\n":
continue
line = line.replace('```sh', '').replace('```', '').replace('<pre>', '').replace('</b>', ''). \
replace('<b>', '').replace('<!-- -->', '').replace('<br/>', '').replace('```sh', ''). \
replace('***', '').replace('***', '').replace('**', '').replace('*', '')
print(" " + line, end='')
print(BASE_C)
exit()
def _check_input_args(args):
if len(args) < 2:
logger.error("not enough arguments, needs at least the initial string and the destination string\n\tneeds the "
"replace mode (-l -r or -s), the initial string, the destination string and the path to process"
"\n\ta typical example would be: " + WHITE + "replace -l dog cat ." + BASE_C)
exit(1)
def _init_indicators():
file_name_must_end_by = []
local = True # default
recursive = False
specific = False
ask_replace = True # default
case_sensitive = True # default
black_list_extensions = True # default
binary_accepted = False # default
symlink_accepted = False # default
excluded_strings = []
excluded_extensions = []
excluded_paths = []
return file_name_must_end_by, local, recursive, specific, ask_replace, case_sensitive, black_list_extensions, + \
binary_accepted, symlink_accepted, excluded_strings, excluded_extensions, excluded_paths
def _init_args():
init_str = None
dest_str = None
dir_path_to_apply = None
file_paths_to_apply = []
return init_str, dest_str, dir_path_to_apply, file_paths_to_apply
def _treat_input_args(input_args):
file_name_must_end_by, local, recursive, specific, ask_replace, case_sensitive, \
black_list_extensions, binary_accepted, symlink_accepted, excluded_strings, \
excluded_extensions, excluded_paths = _init_indicators()
init_str, dest_str, dir_path_to_apply, file_paths_to_apply = _init_args()
nb_args = len(input_args)
not_compatible_shorts_indicators = []
args_not_used_indexes = list(range(nb_args))
for arg_index, arg in enumerate(input_args):
if arg.startswith("--"):
if arg in FILENAME_MUST_END_BY_INDICATORS_STRINGS + INITIAL_STRING_INDICATORS_STRINGS + \
DESTINATION_STRING_INDICATORS_STRINGS + DIRECTORY_PATH_TO_APPLY_INDICATORS_STRINGS + \
LIST_FILES_PATHS_TO_APPLY_INDICATORS_STRINGS + ASK_CONFIRMATION_INDICATORS_STRINGS + \
NO_ASK_CONFIRMATION_INDICATORS_STRINGS + CASE_SENSITIVE_INDICATORS_STRINGS + \
CASE_INSENSITIVE_INDICATORS_STRINGS + NO_BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS + \
BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS + ADD_EXCLUDED_EXTENSIONS_INDICATORS_STRINGS + \
EXCLUDED_PATHS_INDICATORS_STRINGS + ADD_EXCLUDED_STRINGS_INDICATORS_STRINGS + \
BINARY_INDICATORS_STRINGS + NO_BINARY_INDICATORS_STRINGS + \
SYMLINK_INDICATORS_STRINGS + NO_SYMLINK_INDICATORS_STRINGS + END_INDICATORS_STRINGS:
if arg in ASK_CONFIRMATION_INDICATORS_STRINGS:
ask_replace = True
elif arg in NO_ASK_CONFIRMATION_INDICATORS_STRINGS:
ask_replace = False
elif arg in CASE_SENSITIVE_INDICATORS_STRINGS:
case_sensitive = True
elif arg in CASE_INSENSITIVE_INDICATORS_STRINGS:
case_sensitive = False
elif arg in NO_BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS:
black_list_extensions = False
elif arg in BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS:
black_list_extensions = True
elif arg in BINARY_INDICATORS_STRINGS:
binary_accepted = True
elif arg in NO_BINARY_INDICATORS_STRINGS:
binary_accepted = False
elif arg in SYMLINK_INDICATORS_STRINGS:
symlink_accepted = True
elif arg in NO_SYMLINK_INDICATORS_STRINGS:
symlink_accepted = False
elif arg in END_INDICATORS_STRINGS:
pass
elif arg_index < nb_args - 1:
if arg in FILENAME_MUST_END_BY_INDICATORS_STRINGS:
for potential_end_filter_index, potential_end_filter in enumerate(input_args[arg_index + 1:]):
if not potential_end_filter.startswith("-"):
file_name_must_end_by.append(potential_end_filter)
args_not_used_indexes.remove(arg_index + 1 + potential_end_filter_index)
else:
break
elif arg in EXCLUDED_PATHS_INDICATORS_STRINGS:
for potential_file_path_to_exclude_index, potential_file_path_to_exclude in enumerate(
input_args[arg_index + 1:]):
if not potential_file_path_to_exclude.startswith("-"):
potential_file_path_to_exclude = get_full_path_joined(potential_file_path_to_exclude)
args_not_used_indexes.remove(
arg_index + 1 + potential_file_path_to_exclude_index)
if _check_path_exists(potential_file_path_to_exclude):
excluded_paths.append(potential_file_path_to_exclude)
else:
break
elif arg in ADD_EXCLUDED_EXTENSIONS_INDICATORS_STRINGS:
for potential_new_excluded_extension_index, potential_new_excluded_extension in enumerate(
input_args[arg_index + 1:]):
if not potential_new_excluded_extension.startswith("-"):
excluded_extensions.append(potential_new_excluded_extension)
args_not_used_indexes.remove(
arg_index + 1 + potential_new_excluded_extension_index)
else:
break
elif arg in ADD_EXCLUDED_STRINGS_INDICATORS_STRINGS:
for potential_new_excluded_string_index, potential_new_excluded_string in enumerate(
input_args[arg_index + 1:]):
if not potential_new_excluded_string.startswith("-"):
excluded_strings.append(potential_new_excluded_string)
args_not_used_indexes.remove(
arg_index + 1 + potential_new_excluded_string_index)
else:
break
elif arg in INITIAL_STRING_INDICATORS_STRINGS:
init_str = input_args[arg_index + 1]
args_not_used_indexes.remove(arg_index + 1)
elif arg in DESTINATION_STRING_INDICATORS_STRINGS:
dest_str = input_args[arg_index + 1]
args_not_used_indexes.remove(arg_index + 1)
elif arg in DIRECTORY_PATH_TO_APPLY_INDICATORS_STRINGS:
dir_path_to_apply = input_args[arg_index + 1]
args_not_used_indexes.remove(arg_index + 1)
elif arg in LIST_FILES_PATHS_TO_APPLY_INDICATORS_STRINGS:
for potential_file_path_to_replace_index, potential_file_path_to_replace in enumerate(
input_args[arg_index + 1:]):
if not potential_file_path_to_replace.startswith("-"):
file_paths_to_apply.append(potential_file_path_to_replace)
args_not_used_indexes.remove(
arg_index + 1 + potential_file_path_to_replace_index)
else:
break
else:
logger.error("no parameter after %s indicator" % arg +
"\n\tneeds a parameter after the %s indicator" % arg)
exit(1)
args_not_used_indexes.remove(arg_index)
else:
logger.error("the indicator %s is not supported" % arg +
"\n\tplease remove the %s parameter from the command" % arg)
exit(1)
elif arg.startswith("-"):
for short_indicator in arg[1:]:
if short_indicator not in SUPPORTED_SHORT_INDICATORS:
logger.error("the short indicator -%s is not supported" % short_indicator +
"\n\tplease remove the -%s short indicator from the command" % short_indicator)
exit(1)
elif short_indicator in not_compatible_shorts_indicators:
logger.error("the short indicator -%s is not compatible with these short "
"indicators %s" % (short_indicator, not_compatible_shorts_indicators) +
"\n\tplease remove one of the incompatibles shorts indicators from the command")
exit(1)
elif short_indicator == 'l':
local = True
not_compatible_shorts_indicators += ['r', 's']
elif short_indicator == 'r':
recursive = True
local = False
not_compatible_shorts_indicators += ['l', 's']
elif short_indicator == 's':
specific = True
local = False
not_compatible_shorts_indicators += ['l', 'r']
elif short_indicator == 'a':
ask_replace = True
elif short_indicator == 'c':
case_sensitive = True
args_not_used_indexes.remove(arg_index)
return file_name_must_end_by, init_str, dest_str, dir_path_to_apply, file_paths_to_apply, local, recursive, \
specific, ask_replace, case_sensitive, black_list_extensions, binary_accepted, symlink_accepted, \
excluded_strings, excluded_extensions, excluded_paths, args_not_used_indexes
def _check_only_one_replace_mode_picked(local, specific, recursive):
nb_of_true = 0
for replace_mode in [local, specific, recursive]:
if replace_mode:
nb_of_true += 1
if nb_of_true != 1:
logger.error(
"the replace mode can only be \"local\", \"recursive\" or \"specific\"\n\tplease pick only one mode " +
"with the -l, -r or -s short options")
exit(1)
def _get_final_args_local_recursive(dir_path_to_apply, dest_str, init_str, local, recursive, input_args,
args_not_used_indexes):
if dir_path_to_apply is None:
if not args_not_used_indexes:
logger.error("arguments are missing ... please review the command syntax.")
_args_local_recursive_error(local, recursive)
dir_path_to_apply = input_args[args_not_used_indexes[-1]]
args_not_used_indexes.pop()
if dest_str is None:
if not args_not_used_indexes:
logger.error("arguments are missing ... please review the command syntax.")
_args_local_recursive_error(local, recursive)
dest_str = input_args[args_not_used_indexes[-1]]
args_not_used_indexes.pop()
if init_str is None:
if not args_not_used_indexes:
logger.error("arguments are missing ... please review the command syntax.")
_args_local_recursive_error(local, recursive)
init_str = input_args[args_not_used_indexes[-1]]
args_not_used_indexes.pop()
if dir_path_to_apply is None or dest_str is None or init_str is None:
logger.error("arguments are missing ... please review the command syntax.")
_args_local_recursive_error(local, recursive)
if args_not_used_indexes:
logger.error("too much args entered ... please review the command syntax.")
_args_local_recursive_error(local, recursive)
return dir_path_to_apply, dest_str, init_str
def _args_local_recursive_error(local, recursive):
if local:
logger.error("for a \"local replace\" please precise the \"initial string\", the \"destination string\" and "
"the \"directory path\" to apply the local replacement\na correct command would be:\n\t"
+ WHITE + "replace -l titi toto /home/toto/documents/titi_folder" + BASE_C)
exit(1)
if recursive:
logger.error(
"for a \"recursive replace\" please precise the \"initial string\", the \"destination string\" and "
"the \"directory path\" to apply the recursive replacement from\na correct command would be:\n\t"
+ WHITE + "replace -r titi toto /home/toto/documents/titi_folder" + BASE_C)
exit(1)
def _get_final_args_specific(file_paths_to_apply, dest_str, init_str, specific, input_args, args_not_used_indexes):
if init_str is None:
if not args_not_used_indexes:
logger.error("arguments are missing ... please review the command syntax.")
_args_specific_error(specific)
init_str = input_args[args_not_used_indexes[0]]
args_not_used_indexes.pop(0)
if dest_str is None:
if not args_not_used_indexes:
logger.error("arguments are missing ... please review the command syntax.")
_args_specific_error(specific)
dest_str = input_args[args_not_used_indexes[0]]
args_not_used_indexes.pop(0)
if not file_paths_to_apply:
if not args_not_used_indexes:
logger.error("arguments are missing ... please review the command syntax.")
_args_specific_error(specific)
for parameter_not_already_used_index in args_not_used_indexes:
file_paths_to_apply.append(input_args[parameter_not_already_used_index])
args_not_used_indexes = []
if args_not_used_indexes:
logger.error("too much args entered ... please review the command syntax.")
_args_specific_error(specific)
return file_paths_to_apply, dest_str, init_str
def _args_specific_error(specific):
if specific:
logger.error("for a \"specific replace\" please precise the \"initial string\", "
"the \"destination string\" and the \"list files paths\" to apply the specific replacement\n\t" +
"a correct command would be: " + WHITE + "replace -s titi toto /home/toto/documents/test00 "
"documents/titi_folder/test01 documents/titi_"
"folder/secret_titi_folder/test02" + BASE_C)
exit(1)
def _check_init_str_in_file(file_path, init_str, case_sensitive):
try:
if case_sensitive:
for line in open(file_path, 'r', encoding=ENCODING_INPUT_FILES):
if init_str in line:
return True
return False
elif not case_sensitive:
for line in open(file_path, 'r', encoding=ENCODING_INPUT_FILES):
if _case_less_str1_in_str2(init_str, line):
return True
return False
except PermissionError:
logger.warning("\tyou don't have the permission to access the file " + CFILE_PATHS + "%s" % file_path + BASE_C)
_skipped()
return False
except UnicodeDecodeError:
logger.warning("\tthe file " + CFILE_PATHS + "%s" % file_path + BASE_C + " owns non unicode characters")
_skipped()
return False
except:
logger.error("the file %s seems to cause problem to be opened" % file_path)
exit(1)
def _get_str_positions_in_lines(string, line, case_sensitive):
start_string_indexes = []
if case_sensitive:
for index in range(len(line)):
if line[index:].startswith(string):
start_string_indexes.append(index)
if not case_sensitive:
for index in range(len(line)):
if _normalize_case_less(line)[index:].startswith(_normalize_case_less(string)):
start_string_indexes.append(index)
return start_string_indexes
# def ok(msg=""):
# print(GREEN + "\n\t[OK] " + BASE_C + msg)
# def _info(msg=""):
# print(WHITE + "\n\t[INFO] " + BASE_C + msg)
#
#
# def _warning(msg=""):
# print(ORANGE + "\n\t[WARNING] " + BASE_C + msg)
#
#
# def _error(msg=""):
# print(RED + "\n\t[ERROR] " + BASE_C + msg)
def _skipped():
print(BLUE + "\n\t\t\tskipped\n\n" + BASE_C)
def _check_user_rights(file_path):
current_user = getpass.getuser()
owner_file = getpwuid(stat(file_path).st_uid).pw_name
if owner_file != current_user:
logger.warning("the file " + CFILE_PATHS + "%s" % file_path + BASE_C + " is owned by " + CFILE_PATHS +
"%s" % owner_file + BASE_C + ", might be necessary to manage its permissions")
def _print_prev_lines(previous_lines, line_nb):
if len(previous_lines) == 2:
print(YELLOW + "\t %s: " % (line_nb - 2) + CTEXT_FILES + "%s" % previous_lines[1], end='')
print(YELLOW + "\t %s: " % (line_nb - 1) + CTEXT_FILES + "%s" % previous_lines[0] + BASE_C, end='')
elif len(previous_lines) == 1:
print(YELLOW + "\t %s: " % (line_nb - 1) + CTEXT_FILES + "%s" % previous_lines[0] + BASE_C, end='')
def _display_line_highlighting_init_strs(line, line_nb, start_string_positions, init_str, previous_lines):
if len(start_string_positions) > 1:
logger.info(
"\n\tthere are several occurrences of " + COCCURRENCES + "\"%s\"" % init_str + BASE_C + " in this line:\n")
_print_prev_lines(previous_lines, line_nb)
print(COCCURRENCES + "\t %s: " % line_nb + CTEXT_FILES + "%s" % line[0:start_string_positions[0]], end='')
for string_list_index, string_index in enumerate(start_string_positions):
print(COCCURRENCES + "%s" % init_str + BASE_C, end='')
if string_list_index == len(start_string_positions) - 1:
print(CTEXT_FILES + "%s" % line[string_index + len(init_str):] + BASE_C, end='')
else:
print(CTEXT_FILES + "%s" % line[string_index + len(init_str):start_string_positions[
string_list_index + 1]] + BASE_C, end='')
return start_string_positions
def _display_line_highlighting_defined_init_str(new_line, line, init_str,
defined_init_str_start_position):
print(CTEXT_FILES + "\t\t%s" % new_line, end='')
print(COCCURRENCES + "%s" % init_str + BASE_C, end='')
print(CTEXT_FILES + "%s" %
line[defined_init_str_start_position + len(init_str):] + BASE_C, end='')
def _normalize_case_less(string):
return unicodedata.normalize("NFKD", string.casefold())
def _case_less_equal(str1, str2):
return _normalize_case_less(str1) == _normalize_case_less(str2)
def _case_less_str1_in_str2(str1, str2):
if _normalize_case_less(str1) in _normalize_case_less(str2):
return True
else:
return False
class Abort(Exception):
pass
def _abort_process(temporary_file, temporary_file_path):
temporary_file.close()
os.remove(temporary_file_path)
logger.info(YELLOW + "\n\t\t\taborted ...\n\t\t\t\tSee you later" + BASE_C)
raise Abort
def _complete_new_line(new_line, line, start_string_index, nb_occurrences_in_line, start_string_position,
start_string_positions, len_init_str):
if start_string_index == nb_occurrences_in_line - 1:
new_line += line[start_string_position + len_init_str:]
else:
new_line += line[start_string_position + len_init_str:start_string_positions[start_string_index + 1]]
return new_line
def _build_new_line(new_line, line, start_string_index, nb_occurrences_in_line, start_string_position,
start_string_positions, init_str):
new_line += init_str
new_line = _complete_new_line(new_line, line, start_string_index, nb_occurrences_in_line,
start_string_position,
start_string_positions, len(init_str))
return new_line
def _multi_replacement_on_same_line(line, start_string_positions, init_str, dest_str, temporary_file,
temporary_file_path, skip_file):
global replaced_nb
new_line = line[:start_string_positions[0]]
nb_occurrences_in_line = len(start_string_positions)
for start_string_index, start_string_position in enumerate(start_string_positions):
if skip_file:
new_line = _build_new_line(new_line, line, start_string_index, nb_occurrences_in_line,
start_string_position,
start_string_positions, init_str)
else:
print(YELLOW + "\n\toccurrence %s:" % (start_string_index + 1) + BASE_C)
_display_line_highlighting_defined_init_str(new_line, line, init_str, start_string_position)
replace_conf = input("\n\tperform replacement for this occurrence?\n\t\t[Enter] to proceed\t\t[fF] to "
"skip the rest of the file\n\t\t[oO] to skip this occurrence\t[aA] to abort "
"the replace process\n\t")
if replace_conf == "":
print(CFILE_PATHS + "\t\t\tdone\n\n" + BASE_C)
new_line += dest_str
new_line = _complete_new_line(new_line, line, start_string_index, nb_occurrences_in_line,
start_string_position,
start_string_positions, len(init_str))
replaced_nb += 1
elif replace_conf in ["a", "A"]:
raise Abort
# _abort_process(temporary_file, temporary_file_path)
else:
new_line = _build_new_line(new_line, line, start_string_index, nb_occurrences_in_line,
start_string_position, start_string_positions, init_str)
_skipped()
if replace_conf in ["f", "F"]:
skip_file = True
temporary_file.write(new_line)
return skip_file
def _replace_one_occurrence_asked(replace_conf, temporary_file, temporary_file_path, init_str,
dest_str, line, case_sensitive, skip_file):
global replaced_nb
if replace_conf == "":
if case_sensitive:
temporary_file.write(re.sub(init_str, dest_str, line))
else:
temporary_file.write(re.sub(init_str, dest_str, line, flags=re.I))
replaced_nb += 1
print(CFILE_PATHS + "\t\t\tdone\n\n" + BASE_C)
elif replace_conf in ["a", "A"]:
raise Abort
# _abort_process(temporary_file, temporary_file_path)
else:
temporary_file.write(line)
_skipped()
if replace_conf in ["f", "F"]:
skip_file = True
return skip_file
def _update_previous_lines(previous_lines, line):
if len(previous_lines) == 0:
previous_lines.append(copy.deepcopy(line))
elif len(previous_lines) == 1:
previous_lines.append(copy.deepcopy(previous_lines[0]))
elif len(previous_lines) == 2:
previous_lines[1] = copy.deepcopy(previous_lines[0])
previous_lines[0] = copy.deepcopy(line)
else:
logger.error("previous lines can not have more than 2 lines, check %s" % previous_lines)
exit(1)
def _find_tmp_file_not_existing(file_path):
for i in range(9):
temporary_file_path = file_path + ".rp0" + str(i + 1) + ".tmp"
if not os.path.exists(temporary_file_path):
return temporary_file_path
logger.error("all the available tmp extensions are used for the %s path" % file_path)
exit(1)
def _file_replace(file_path, temporary_file_path, init_str, dest_str, ask_replace, case_sensitive, file_mask):
global found_nb
global replaced_nb
if not os.path.isfile(file_path):
logger.error("The file %s doesn't exist" % file_path)
return
temporary_file = open(temporary_file_path, "w")
logger.info("There is " + COCCURRENCES + "\"%s\"" % init_str + BASE_C + " in the file "
+ CFILE_PATHS + "%s" % file_path + BASE_C)
logger.info("Replacing " + COCCURRENCES + "\"%s\"" % init_str + BASE_C + " by " + COCCURRENCES + "\"%s\""
% dest_str + BASE_C + " in " + CFILE_PATHS + "%s" % file_path + BASE_C + " for the following lines:")
previous_lines = []
skip_file = False
try:
for line_index, line in enumerate(open(file_path, encoding=ENCODING_INPUT_FILES)):
line_nb = line_index + 1
if case_sensitive:
if init_str in line and not skip_file:
start_string_positions = _get_str_positions_in_lines(init_str, line, case_sensitive)
_display_line_highlighting_init_strs(line, line_nb, start_string_positions, init_str,
previous_lines)
found_nb += len(start_string_positions)
if not ask_replace:
temporary_file.write(re.sub(init_str, dest_str, line))
replaced_nb += len(start_string_positions)
elif ask_replace:
if len(start_string_positions) == 1:
replace_conf = input("\n\tperform replacement?\n\t\t[Enter] to proceed\t\t[fF] to skip "
"the rest of the file\n\t\t[oO] to skip this occurrence\t"
"[aA] to abort the replace process\n\t")
skip_file = _replace_one_occurrence_asked(replace_conf,
temporary_file,
temporary_file_path,
init_str,
dest_str, line,
case_sensitive, skip_file)
else:
skip_file = _multi_replacement_on_same_line(line,
start_string_positions,
init_str,
dest_str,
temporary_file,
temporary_file_path,
skip_file)
else:
logger.error("the ask_replace parameter can only be \"True\" or \"False\"" +
"the ask_replace parameter can not be %s" % ask_replace)
exit(1)
else:
temporary_file.write(line)
elif not case_sensitive:
if _case_less_str1_in_str2(init_str, line) and not skip_file:
start_string_positions = _get_str_positions_in_lines(init_str, line, case_sensitive)
_display_line_highlighting_init_strs(line, line_nb, start_string_positions,
init_str, previous_lines)
found_nb += len(start_string_positions)
if not ask_replace:
replaced_nb += len(start_string_positions)
temporary_file.write(re.sub(init_str, dest_str, line, flags=re.I))
elif ask_replace:
if len(start_string_positions) == 1:
replace_conf = input("\n\tPerform replacement?\n\t\t[Enter] to proceed\t\t[fF] to skip "
"The rest of the file\n\t\t[oO] to skip this occurrence\t[aA] to abort "
"The replace process\n\t")
skip_file = _replace_one_occurrence_asked(replace_conf,
temporary_file,
temporary_file_path,
init_str,
dest_str, line,
case_sensitive, skip_file)
else:
skip_file = _multi_replacement_on_same_line(line,
start_string_positions,
init_str,
dest_str,
temporary_file,
temporary_file_path,
skip_file)
else:
logger.error("The ask_replace parameter can only be \"True\" or \"False\"" +
"The ask_replace parameter can not be %s" % ask_replace)
exit(1)
else:
temporary_file.write(line)
else:
logger.error("The case_sensitive parameter can only be \"True\" or \"False\"" +
"The case_sensitive parameter can not be %s" % case_sensitive)
exit(1)
_update_previous_lines(previous_lines, line)
except UnicodeDecodeError as e:
logger.error("File with utf-8 encoding issue\n\t%s" % e)
temporary_file.close()
os.remove(temporary_file_path)
return
except Abort:
temporary_file.close()
os.remove(temporary_file_path)
logger.info(YELLOW + "\n\t\t\taborted ...\n\t\t\t\tSee you later" + BASE_C)
exit(0)
except:
logger.error("Issue while parsing file\n\t%s" % sys.exc_info()[0])
temporary_file.close()
os.remove(file_path)
os.rename(temporary_file_path, file_path)
os.chmod(file_path, int(file_mask, 8))
def _check_file_extension_in_blacklist(file_path):
for black_list_extension in BLACK_LIST_EXTENSIONS_LIST:
if file_path.endswith(black_list_extension):
logger.warning("the file %s owns the extension %s that is not accepted by default\n\t"
"use one of these parameters %s if you want to perform replacement in this kind of file "
"anyway" % (file_path, black_list_extension, NO_BLACK_LIST_EXTENSIONS_INDICATORS_STRINGS))
_skipped()
return True
return False
def _check_file_name_must_end_by(file_name_must_end_by, file_path):
for acceptable_file_name_end in file_name_must_end_by:
if file_path.endswith(acceptable_file_name_end):
return True
logger.warning(
"the file %s doesn't end by the acceptable end extensions you entered: %s" % (file_path, file_name_must_end_by))
_skipped()
return False
def _check_file_owns_excluded_path(excluded_paths, file_path):
for excluded_path in excluded_paths:
if file_path.startswith(excluded_path):
logger.warning(
"the file %s is excluded regarding the excluded path %s you entered" % (file_path, excluded_path))
_skipped()
return True
return False
def _check_file_owns_excluded_extension(excluded_extensions, file_path):
for excluded_extension in excluded_extensions:
if file_path.endswith(excluded_extension):
logger.warning("the file %s is excluded regarding the "
"excluded extension %s you entered" % (file_path, excluded_extension))
_skipped()
return True
return False
def _check_file_owns_excluded_str(excluded_strings, file_path):
for excluded_string in excluded_strings:
if excluded_string in file_path:
logger.warning(
"the file %s is excluded regarding the excluded string %s you entered" % (file_path, excluded_string))
_skipped()
return True
return False
def _check_able_open_file(file_path):
try:
open_test = open(file_path, 'r', encoding=ENCODING_INPUT_FILES)
open_test.close()
return True
except FileNotFoundError:
logger.error("The file " + CFILE_PATHS + "%s" % file_path + BASE_C + " doesn't exist")
_skipped()
return False
except PermissionError:
logger.error("You don't have the permission to access the file " + CFILE_PATHS + "%s" % file_path + BASE_C)
_skipped()
return False
except IsADirectoryError:
logger.error("The path " + CFILE_PATHS + "%s" % file_path + BASE_C + "is a directory, not a file")
_skipped()
return False
except OSError:
logger.error("No such device or address " + CFILE_PATHS + "%s" % file_path + BASE_C)
_skipped()
return False
except UnicodeDecodeError as e:
print("Decode byte error for file %s\n%s" % (file_path, e))
return False
def _check_able_create_temporary(temporary_file_path, file_mask):
try:
open_test = open(temporary_file_path, 'w+')
open_test.close()
os.chmod(temporary_file_path, int(file_mask, 8))
os.remove(temporary_file_path)
return True
except FileNotFoundError:
logger.error("the file " + CFILE_PATHS + "%s" % temporary_file_path + BASE_C + " doesn't exist")
_skipped()
return False
except PermissionError:
logger.error(
"you don't have the permission to create the file " + CFILE_PATHS + "%s" % temporary_file_path + BASE_C)
_skipped()
return False
except IsADirectoryError:
logger.error(" the path " + CFILE_PATHS + "%s" % temporary_file_path + BASE_C + "is a directory, not a file")
_skipped()
return False
def _check_binary_file(file_path):
textchars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7f})
is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))
try:
if is_binary_string(open(file_path, 'rb').read(1024)):
logger.warning("the file %s is a binary file" % file_path)
_skipped()
return True
return False
except PermissionError:
logger.error("you don't have the permission to access the file " + CFILE_PATHS + "%s" % file_path + BASE_C)
_skipped()
return True
def _check_symlink_path(file_path):
if os.path.islink(file_path):
logger.warning("the file %s is a symlink file" % file_path)
_skipped()
return True
return False
def _get_temporary_file_path(file_path, excluded_paths):
temporary_file_path = file_path + ".tmp"
if os.path.exists(temporary_file_path):
temporary_file_path = _find_tmp_file_not_existing(file_path)
excluded_paths.append(temporary_file_path)
return temporary_file_path
def _get_file_permission_mask(file_path):
return oct(os.stat(file_path).st_mode & 0o777)
def _replace_local_recursive(directory_path, init_str, dest_str, black_list_extensions, file_name_must_end_by,
excluded_paths, excluded_strings, excluded_extensions, local, ask_replace, case_sensitive,
binary_accepted, symlink_accepted):
for directory_path, directory_names, file_names in os.walk(directory_path):
for file_name in file_names:
file_path = os.path.join(directory_path, file_name)
if _check_file_owns_excluded_path(excluded_paths, file_path):
continue
if _check_file_owns_excluded_extension(excluded_extensions, file_path):
continue
if _check_file_owns_excluded_str(excluded_strings, file_path):
continue
if not _check_path_exists(file_path):
logger.warning("the file path " + CFILE_PATHS + "%s" % file_path + BASE_C +
" seems to cause problem, might be a broken symlink")
continue
_check_user_rights(file_path)
if not _check_able_open_file(file_path):
continue
if black_list_extensions:
if _check_file_extension_in_blacklist(file_path):
continue
if not symlink_accepted:
if _check_symlink_path(file_path):
continue
if not binary_accepted:
if _check_binary_file(file_path):
continue
if file_name_must_end_by:
if not _check_file_name_must_end_by(file_name_must_end_by, file_path):
continue
file_mask = _get_file_permission_mask(file_path)
temporary_file_path = _get_temporary_file_path(file_path, excluded_paths)
if not _check_able_create_temporary(temporary_file_path, file_mask):
continue
if _check_init_str_in_file(file_path, init_str, case_sensitive):
_file_replace(file_path, temporary_file_path, init_str, dest_str,
ask_replace, case_sensitive, file_mask)
if local:
break
def _check_folder_path_exists(folder_path):
if not os.path.isdir(folder_path):
logger.warning(CFILE_PATHS + " %s " % folder_path + BASE_C + "folder doesn't exist" +
"\n\tthe directory path to apply %s doesn't exist, you may review it" % folder_path)
exit(1)
def _check_path_exists(path):
if not os.path.exists(path):
logger.warning(CFILE_PATHS + " %s " % path + BASE_C + "path doesn't exist")
return False
return True
def _replace_specific(file_paths_to_apply, init_str, dest_str, black_list_extensions, ask_replace, case_sensitive,
binary_accepted, symlink_accepted):
for file_path in file_paths_to_apply:
if not _check_path_exists(file_path):
logger.warning("the file path %s seems to cause problem, might be a broken symlink" % file_path)
_skipped()
continue
_check_user_rights(file_path)
if not _check_able_open_file(file_path):
continue
if black_list_extensions:
if _check_file_extension_in_blacklist(file_path):
continue
if not symlink_accepted:
if _check_symlink_path(file_path):
continue
if not binary_accepted:
if _check_binary_file(file_path):
continue
#
# except UnicodeDecodeError as e:
# print("Decode byte error for file %s\n%s" % (file_path, e))
file_mask = _get_file_permission_mask(file_path)
temporary_file_path = _get_temporary_file_path(file_path, [])
if not _check_able_create_temporary(temporary_file_path, file_mask):
continue
if _check_init_str_in_file(file_path, init_str, case_sensitive):
_file_replace(file_path, temporary_file_path, init_str, dest_str, ask_replace, case_sensitive, file_mask)
def _occs_summary(init_str):
global found_nb
global replaced_nb
if found_nb == 0:
logger.info(
CFILE_PATHS + "\n\t0" + BASE_C + " occurrence of " + COCCURRENCES + "%s" % init_str + BASE_C + " found")
elif found_nb == 1:
logger.info(CFILE_PATHS + "\n\t1" + BASE_C + " occurrence of " + COCCURRENCES + "%s" % init_str +
BASE_C + " found and " + CFILE_PATHS + "%s" % replaced_nb + BASE_C + " replaced")
else:
logger.info(CFILE_PATHS + "\n\t%s" % found_nb + BASE_C + " occurrences of " + COCCURRENCES +
"%s" % init_str + BASE_C + " found and " + CFILE_PATHS + "%s" % replaced_nb +
BASE_C + " replaced")
def _get_full_paths(dir_path_to_apply, file_paths_to_apply):
if file_paths_to_apply:
file_paths_list = []
for file_name in file_paths_to_apply:
file_paths_list.append(get_full_path_joined(file_name))
file_paths_to_apply = file_paths_list
if dir_path_to_apply is not None:
dir_path_to_apply = get_full_path_joined(dir_path_to_apply)
return dir_path_to_apply, file_paths_to_apply
def get_full_path_joined(given_path):
return os.path.normpath((os.path.join(os.getcwd(), os.path.expanduser(given_path))))
def _check_integrity_of_mode_request(local, recursive, specific, dir_path_to_apply, file_paths_to_apply):
if local or recursive:
if file_paths_to_apply:
logger.warning("The replace mode is not specific, file_paths_to_apply should be empty and "
"is: %s" % file_paths_to_apply + "\n\tfile_paths_to_apply should be empty")
exit(1)
if dir_path_to_apply is None:
logger.warning("The replace mode is not specific, dir_path_to_apply should not be None" +
"\n\tdir_path_to_apply should be defined")
exit(1)
if not _check_path_exists(dir_path_to_apply):
logger.error("The directory path %s doesn't exist" % dir_path_to_apply)
exit(1)
if not os.path.isdir(dir_path_to_apply):
logger.info(
CFILE_PATHS + "%s" % dir_path_to_apply + BASE_C + " is a file, proceeding specific replace mode")
local = False
recursive = False
specific = True
file_paths_to_apply.append(dir_path_to_apply)
elif specific:
if not file_paths_to_apply:
logger.error(
"The replace mode is specific, file_paths_to_apply should not be empty and is: %s"
% file_paths_to_apply + "\n\tfile_paths_to_apply should not be empty in specific mode replacement")
exit(1)
if dir_path_to_apply is not None:
logger.error("The replace mode is specific, dir_path_to_apply should be None" +
"\n\tdir_path_to_apply should not be defined")
exit(1)
file_paths = []
for file_path in file_paths_to_apply:
if not _check_path_exists(file_path):
logger.warning(
CFILE_PATHS + "%s" % file_path + BASE_C + " doesn't exist, removing it from the file list")
elif os.path.isdir(file_path):
logger.warning(CFILE_PATHS + "%s" % file_path + BASE_C + " is a folder, removing it from the file list")
else:
file_paths.append(file_path)
file_paths_to_apply = file_paths
return local, recursive, specific, dir_path_to_apply, file_paths_to_apply
def launch():
input_args = sys.argv[1:]
_help_requested(input_args)
_check_input_args(input_args)
# file_name_must_end_by, local, recursive, specific, ask_replace, case_sensitive, \
# black_list_extensions, binary_accepted, symlink_accepted, excluded_strings, \
# excluded_extensions, excluded_paths = _init_indicators()
# init_str, dest_str, dir_path_to_apply, file_paths_to_apply = _init_args()
file_name_must_end_by, init_str, dest_str, dir_path_to_apply, \
file_paths_to_apply, local, recursive, specific, ask_replace, \
case_sensitive, black_list_extensions, binary_accepted, \
symlink_accepted, excluded_strings, excluded_extensions, \
excluded_paths, args_not_used_indexes = _treat_input_args(input_args)
_check_only_one_replace_mode_picked(local, specific, recursive)
# finalize getting all the parameters
if local or recursive:
dir_path_to_apply, dest_str, init_str = \
_get_final_args_local_recursive(dir_path_to_apply, dest_str,
init_str, local, recursive, input_args,
args_not_used_indexes)
elif specific:
file_paths_to_apply, dest_str, init_str = \
_get_final_args_specific(file_paths_to_apply, dest_str,
init_str, specific, input_args,
args_not_used_indexes)
else:
logger.error("the replace mode can only be \"local\", \"recursive\" or \"specific\"" +
"\n\tplease pick only one mode with the -l, -r or -s short options")
exit(1)
dir_path_to_apply, file_paths_to_apply = _get_full_paths(dir_path_to_apply, file_paths_to_apply)
local, recursive, specific, dir_path_to_apply, file_paths_to_apply = \
_check_integrity_of_mode_request(local, recursive, specific, dir_path_to_apply, file_paths_to_apply)
# apply the replace
# if local:
# found_nb, replaced_nb = \
# _replace_local_recursive(dir_path_to_apply, init_str, dest_str, black_list_extensions,
# file_name_must_end_by, excluded_paths, excluded_strings, excluded_extensions,
# local,
# found_nb, replaced_nb, ask_replace, case_sensitive,
# binary_accepted, symlink_accepted)
#
# elif recursive:
# found_nb, replaced_nb = \
# _replace_local_recursive(dir_path_to_apply, init_str, dest_str, black_list_extensions,
# file_name_must_end_by, excluded_paths, excluded_strings, excluded_extensions,
# local,
# found_nb, replaced_nb, ask_replace, case_sensitive,
# binary_accepted, symlink_accepted)
if local or recursive:
_replace_local_recursive(dir_path_to_apply, init_str, dest_str, black_list_extensions,
file_name_must_end_by, excluded_paths, excluded_strings, excluded_extensions,
local, ask_replace, case_sensitive,
binary_accepted, symlink_accepted)
elif specific:
_replace_specific(file_paths_to_apply, init_str, dest_str, black_list_extensions, ask_replace,
case_sensitive, binary_accepted, symlink_accepted)
else:
logger.error("the replace mode can only be \"local\", \"recursive\" or \"specific\"\n\t"
"please pick only one mode with the -l, -r or -s short options")
exit(1)
_occs_summary(init_str)
if __name__ == "__main__":
launch()
| [
"os.path.expanduser",
"os.path.exists",
"os.rename",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"sys.exc_info",
"re.sub",
"os.path.isdir",
"copy.deepcopy",
"getpass.getuser",
"os.stat",
"os.path.islink",
"os.walk",
"os.remove"
] | [((20248, 20265), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (20263, 20265), False, 'import getpass\n'), ((22837, 22867), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (22846, 22867), False, 'import os\n'), ((34246, 34266), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (34255, 34266), False, 'import os\n'), ((34271, 34312), 'os.rename', 'os.rename', (['temporary_file_path', 'file_path'], {}), '(temporary_file_path, file_path)\n', (34280, 34312), False, 'import os\n'), ((38887, 38912), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (38901, 38912), False, 'import os\n'), ((39149, 39184), 'os.path.exists', 'os.path.exists', (['temporary_file_path'], {}), '(temporary_file_path)\n', (39163, 39184), False, 'import os\n'), ((39781, 39804), 'os.walk', 'os.walk', (['directory_path'], {}), '(directory_path)\n', (39788, 39804), False, 'import os\n'), ((27677, 27702), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (27691, 27702), False, 'import os\n'), ((37626, 37656), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (37635, 37656), False, 'import os\n'), ((41694, 41720), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (41707, 41720), False, 'import os\n'), ((41983, 42003), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (41997, 42003), False, 'import os\n'), ((26788, 26807), 'copy.deepcopy', 'copy.deepcopy', (['line'], {}), '(line)\n', (26801, 26807), False, 'import copy\n'), ((27331, 27366), 'os.path.exists', 'os.path.exists', (['temporary_file_path'], {}), '(temporary_file_path)\n', (27345, 27366), False, 'import os\n'), ((33893, 33923), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (33902, 33923), False, 'import os\n'), ((33996, 34026), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (34005, 34026), False, 'import os\n'), ((39868, 39907), 'os.path.join', 'os.path.join', (['directory_path', 'file_name'], {}), '(directory_path, file_name)\n', (39880, 39907), False, 'import os\n'), ((44763, 44774), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (44772, 44774), False, 'import os\n'), ((44776, 44806), 'os.path.expanduser', 'os.path.expanduser', (['given_path'], {}), '(given_path)\n', (44794, 44806), False, 'import os\n'), ((45607, 45639), 'os.path.isdir', 'os.path.isdir', (['dir_path_to_apply'], {}), '(dir_path_to_apply)\n', (45620, 45639), False, 'import os\n'), ((20292, 20307), 'os.stat', 'stat', (['file_path'], {}), '(file_path)\n', (20296, 20307), False, 'from os import stat\n'), ((26202, 26234), 're.sub', 're.sub', (['init_str', 'dest_str', 'line'], {}), '(init_str, dest_str, line)\n', (26208, 26234), False, 'import re\n'), ((26283, 26327), 're.sub', 're.sub', (['init_str', 'dest_str', 'line'], {'flags': 're.I'}), '(init_str, dest_str, line, flags=re.I)\n', (26289, 26327), False, 'import re\n'), ((26875, 26907), 'copy.deepcopy', 'copy.deepcopy', (['previous_lines[0]'], {}), '(previous_lines[0])\n', (26888, 26907), False, 'import copy\n'), ((26972, 27004), 'copy.deepcopy', 'copy.deepcopy', (['previous_lines[0]'], {}), '(previous_lines[0])\n', (26985, 27004), False, 'import copy\n'), ((27033, 27052), 'copy.deepcopy', 'copy.deepcopy', (['line'], {}), '(line)\n', (27046, 27052), False, 'import copy\n'), ((39392, 39410), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (39399, 39410), False, 'import os\n'), ((46747, 46771), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (46760, 46771), False, 'import os\n'), ((34195, 34209), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (34207, 34209), False, 'import sys\n'), ((28915, 28947), 're.sub', 're.sub', (['init_str', 'dest_str', 'line'], {}), '(init_str, dest_str, line)\n', (28921, 28947), False, 'import re\n'), ((31522, 31566), 're.sub', 're.sub', (['init_str', 'dest_str', 'line'], {'flags': 're.I'}), '(init_str, dest_str, line, flags=re.I)\n', (31528, 31566), False, 'import re\n')] |
from inspect import getframeinfo, stack
from math import ceil, log
from . import back_end
def number_of_bits_needed(x):
x = int(x)
n = 1
while 1:
max_value = (2 ** n) - 1
min_value = 0
if min_value <= x <= max_value:
return n
n += 1
def const(value):
if hasattr(value, "vector"):
return value
bits = number_of_bits_needed(value)
subtype = Unsigned(bits)
return Constant(subtype, value)
class Unsigned:
def __init__(self, bits):
self.bits = bits
def to_vector(self, value):
return int(value)
def from_vector(self, value):
return value
def constant(self, value):
return Constant(self, value)
def input(self, name):
return Input(self, name)
def output(self, name, expression):
return Output(self, name, expression)
def select(self, select, *args, **kwargs):
return Select(self, select, *args, **kwargs)
def register(self, clk, en=1, init=None, d=None):
return Register(self, clk, en, init, d)
def wire(self):
caller = getframeinfo(stack()[1][0])
return Wire(self, caller.filename, caller.lineno)
def rom(self, select, *args, **kwargs):
return ROM(self, select, *args, **kwargs)
def ram(self, *args, **kwargs):
return RAM(self, *args, **kwargs)
def dpr(self, *args, **kwargs):
return DPR(self, *args, **kwargs)
def Boolean():
return Unsigned(1)
def binary(a, b, operator):
b = const(b)
binary = back_end.Binary(a.vector, b.vector, operator)
subtype = Unsigned(binary.bits)
return Expression(subtype, binary, "(" + repr(a) + operator + repr(b) + ")")
def unary(a, operator):
unary = back_end.Unary(a.vector, operator)
subtype = Unsigned(unary.bits)
return Expression(subtype, unary, "(" + operator + repr(a) + ")")
class Expression:
def __init__(self, subtype, vector, string):
self.subtype = subtype
self.vector = vector
self.string = string
def label(self, label_string):
a = self
vector = back_end.Label(self.vector, label_string)
subtype = Unsigned(vector.bits)
return Expression(
subtype, vector, "%s.label(%s)" % (repr(a), str(label_string))
)
def cat(self, other):
a = self
b = const(other)
vector = back_end.Concatenate(a.vector, b.vector)
subtype = Unsigned(vector.bits)
return Expression(subtype, vector, "%s.cat(%s)" % (repr(a), repr(b)))
def resize(self, bits):
binary = back_end.Resize(self.vector, bits)
subtype = Unsigned(binary.bits)
return Expression(subtype, binary, "%s.resize(%s)" % (repr(self), str(bits)))
def __add__(self, other):
return binary(self, other, "+")
def __sub__(self, other):
return binary(self, other, "-")
def __mul__(self, other):
return binary(self, other, "*")
def __gt__(self, other):
return binary(self, other, ">")
def __ge__(self, other):
return binary(self, other, ">=")
def __lt__(self, other):
return binary(self, other, "<")
def __le__(self, other):
return binary(self, other, "<=")
def __eq__(self, other):
return binary(self, other, "==")
def __ne__(self, other):
return binary(self, other, "!=")
def __lshift__(self, other):
return binary(self, other, "<<")
def __rshift__(self, other):
return binary(self, other, ">>")
def __and__(self, other):
return binary(self, other, "&")
def __or__(self, other):
return binary(self, other, "|")
def __xor__(self, other):
return binary(self, other, "^")
def __neg__(self):
return unary(self, "-")
def __invert__(self):
return unary(self, "~")
def __abs__(self):
return self
def __getitem__(self, other):
try:
vector = back_end.Index(self.vector, int(other))
subtype = Unsigned(vector.bits)
return Expression(subtype, vector, "%s[%s]" % (self, other))
except TypeError:
vector = back_end.Slice(self.vector, other.start, other.stop)
subtype = Unsigned(vector.bits)
return Expression(
subtype, vector, "%s[%s:%s]" % (self, other.start, other.stop)
)
def get(self):
return self.subtype.from_vector(self.vector.get())
def __repr__(self):
return self.string
class Constant(Expression):
def __init__(self, subtype, value):
self.subtype = subtype
self.vector = back_end.Constant(subtype.to_vector(value), subtype.bits)
def __repr__(self):
return str(self.vector.value)
class Input(Expression):
def __init__(self, subtype, name):
self.subtype = subtype
self.vector = back_end.Input(name, subtype.bits)
self.string = "Input(%s)" % name
def set(self, value):
self.vector.set(self.subtype.to_vector(value))
class Output(Expression):
def __init__(self, subtype, name, expression):
self.subtype = subtype
self.vector = back_end.Output(name, expression.vector)
self.string = "Output(%s)" % name
def get(self):
return self.subtype.from_vector(self.vector.get())
class Select(Expression):
def __init__(self, subtype, select, *args, **kwargs):
select = const(select).vector
args = [const(i).vector for i in args]
default = const(kwargs.get("default", 0)).vector
self.vector = back_end.Select(select, *args, default=default)
self.subtype = Unsigned(self.vector.bits)
self.string = "select()"
class ROM(Expression):
def __init__(self, subtype, select, *args, **kwargs):
select = const(select).vector
args = [int(i) for i in args]
default = int(kwargs.get("default", 0))
self.vector = back_end.ROM(subtype.bits, select, *args, default=default)
self.subtype = subtype
self.string = "ROM()"
def DPRPort(ram, clk, addr, data, wen):
subtype = ram.subtype
ram = back_end.DPRPort(
ram.ram,
clk,
addr.vector,
data.vector,
wen.vector,
)
return Expression(subtype, ram, "DPR.portb()")
class DPR:
def __init__(self, subtype, depth, clk, initialise=None):
self.subtype = subtype
self.depth = depth
self.clk = clk
self.initialise = initialise
def porta(self, addr, data, wen):
self.ram = back_end.DPR(
self.subtype.bits,
self.depth,
self.clk,
addr.vector,
data.vector,
wen.vector,
self.initialise
)
return Expression(self.subtype, self.ram, "DPR.porta()")
def portb(self, addr, data, wen):
return DPRPort(self, self.clk, addr, data, wen)
class RAM:
def __init__(self, subtype, depth, clk, asynchronous=True, initialise=None):
self.subtype = subtype
self.write_address = Unsigned(int(ceil(log(depth, 2)))).wire()
self.read_address = Unsigned(int(ceil(log(depth, 2)))).wire()
self.write_data = subtype.wire()
self.read_data = subtype.wire()
self.write_enable = Boolean().wire()
self.read_enable = Boolean().wire()
self.ram = back_end.RAM(
subtype.bits,
depth,
clk,
self.write_address.vector,
self.write_data.vector,
self.write_enable.vector,
self.read_address.vector,
self.read_enable.vector,
asynchronous=asynchronous,
initialise=initialise,
)
def add_port(self, clk):
return RAMPort(self, clk)
def write(self, wraddr, wrdata, wren):
self.write_address.drive(wraddr)
self.write_data.drive(wrdata)
self.write_enable.drive(wren)
def read(self, rdaddr, rden=1):
self.read_address.drive(rdaddr)
self.read_enable.drive(const(rden))
return Expression(self.subtype, self.ram, "ram()")
class Register(Expression):
def __init__(self, subtype, clk, en, init, d):
self.subtype = subtype
d = d if d is None else const(d).vector
init = init if init is None else int(init)
en = const(en).vector
self.vector = back_end.Register(
clock=clk, bits=subtype.bits, en=en, d=d, init=init
)
def d(self, expression):
self.vector.d = expression.vector
def __repr__(self):
return "register"
class Wire(Expression):
def __init__(self, subtype, filename, lineno):
self.subtype = subtype
self.vector = back_end.Wire(filename, lineno, bits=subtype.bits)
def drive(self, expression):
self.vector.drive(expression.vector)
def __repr__(self):
return "wire"
| [
"inspect.stack",
"math.log"
] | [((1131, 1138), 'inspect.stack', 'stack', ([], {}), '()\n', (1136, 1138), False, 'from inspect import getframeinfo, stack\n'), ((7133, 7146), 'math.log', 'log', (['depth', '(2)'], {}), '(depth, 2)\n', (7136, 7146), False, 'from math import ceil, log\n'), ((7203, 7216), 'math.log', 'log', (['depth', '(2)'], {}), '(depth, 2)\n', (7206, 7216), False, 'from math import ceil, log\n')] |
""""sitk_align - Align using simple ITK and Elastix
Partially derived from elastix_auto.py by <NAME>
"""
import argparse
import json
import SimpleITK as sitk
import numpy as np
import os
import re
import shutil
import tempfile
from ._sitk_align import parse as parse_pts_file
FINAL_GRID_SPACING_IN_VOXELS = ['32', '32', '32']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--moving-file",
help="Path to file to be aligned",
required=True)
parser.add_argument("--fixed-file",
help="Path to the reference file",
required=True)
parser.add_argument("--fixed-point-file",
help="Path to the marked points on the reference image")
parser.add_argument("--alignment-point-file",
help="Path to the file for nuggt-align's initial pts",
default=None)
parser.add_argument("--xyz",
help="Coordinates in --fixed-point-file are in "
"X, Y, Z form, not Z, Y, X (e.g. the output of nuggt)",
action="store_true",
default=False)
parser.add_argument("--aligned-file",
help="Path to the alignment image file to be written.",
default=None)
parser.add_argument("--final-grid-spacing",
help="The spacing between voxels for the bspline grid",
default=None)
parser.add_argument("--transform-parameters-folder",
help="sitk-align will save the TransformParameters.txt"
" files in this directory if specified.",
default=None)
return parser.parse_args()
def getParameterMap(rigid=True, affine=True, bspline=True):
parameterMapVector = sitk.VectorOfParameterMap()
if rigid:
rigidMap = sitk.GetDefaultParameterMap("rigid")
rigidMap['MaximumNumberOfIterations'] = ['1000']
rigidMap['NumberOfHistogramBins'] = ['8','16','32']
rigidMap['NumberOfResolutions'] = ['6']
rigidMap['MaximumStepLength'] = ['0.5']
rigidMap['Registration'] = ['MultiMetricMultiResolutionRegistration']
rigidMap['Interpolator'] = ['BSplineInterpolator']
parameterMapVector.append(rigidMap)
if affine:
affineMap = sitk.GetDefaultParameterMap("affine")
affineMap['MaximumNumberOfIterations'] = ['1000']
affineMap['Registration'] = ['MultiMetricMultiResolutionRegistration']
affineMap['Interpolator'] = ['BSplineInterpolator']
affineMap['BSplineInterpolationOrder'] = ['3']
affineMap['FinalBSplineInterpolationOrder'] = ['3']
affineMap['NumberOfHistogramBins'] = ['4','8','16','32']
affineMap['NumberOfResolutions'] = ['5']
affineMap['MaximumStepLength'] = ['0.5']
parameterMapVector.append(affineMap)
if bspline:
bsplineMap = sitk.GetDefaultParameterMap("bspline")
bsplineMap['MaximumNumberOfIterations'] = ['5000']
bsplineMap['Registration'] = ['MultiMetricMultiResolutionRegistration']
bsplineMap['Interpolator'] = ['BSplineInterpolator']
bsplineMap['BSplineInterpolationOrder'] = ['3']
# increasing make atlas deform more, decrease deform less. should be odd numbers from 3
bsplineMap['FinalBSplineInterpolationOrder'] = ['3']
# increasing make atlas deform more, decrease deform less. should be odd numbers from 3
#bsplineMap['FinalGridSpacingInVoxels'] = ['8']
bsplineMap['FinalGridSpacingInVoxels'] = FINAL_GRID_SPACING_IN_VOXELS
# increasing make atlas deform less, decrease deform more., current issue might be the gridspacing issue
bsplineMap['NumberOfHistogramBins'] = ['4','8','16','32','64']
bsplineMap['NumberOfResolutions'] = ['6']
bsplineMap['MaximumStepLength'] = ['1']
# default : 1
bsplineMap['ResultImagePixelType'] = ['int']
bsplineMap['Metric0Weight'] = ['4']
bsplineMap['Metric1Weight'] = ['1000']
bsplineMap['Metric2Weight'] = ['1']
bsplineMap.erase('FinalGridSpacingInPhysicalUnits')
bsplineMap.erase('GridSpacingSchedule')
parameterMapVector.append(bsplineMap)
return parameterMapVector
def align(fixed_image, moving_image, aligned_image_path,
transform_parameter_folder = None):
"""Align the files
:param fixed_image: the SimpleITK image for the fixed image
:param moving_path: the SimpleITK moving image
:param aligned_image_path: path to write the image after alignment or
None if user does not want the image
:param transform_parameter_folder: where to store the transform
parameter files (this is a side-effect of running
ElastixImageFilter.Execute in the transfer_parameter_folder directory)
:returns: a transform map
"""
selx = sitk.ElastixImageFilter()
parameterMapVector = getParameterMap(False,True,True)
selx.SetParameterMap(parameterMapVector)
selx.SetFixedImage(fixed_image)
selx.SetMovingImage(moving_image)
curdir = os.path.abspath(os.getcwd())
try:
if transform_parameter_folder is not None:
os.chdir(transform_parameter_folder)
selx.Execute()
finally:
os.chdir(curdir)
if aligned_image_path is not None:
sitk.WriteImage(selx.GetResultImage(), aligned_image_path)
return selx.GetTransformParameterMap()
def write_point_set(filename, points):
"""Write a point set file, the way Elastix wants
The format of the file:
"point" or "index"
# of points
x y z
...
:param filename: Write to this file
:param points: an Nx3 array of points to write
"""
with open(filename, "w") as fd:
fd.write("point\n")
fd.write("%d\n" % len(points))
for z, y, x in points:
fd.write("%f %f %f\n" % (x, y, z))
def read_point_set(filename):
"""Read the point set file that's the output of the transformation
:param filename: The location of the point set file
:returns: an Nx3 array of the output points
"""
pattern = \
"%s\\s*=\\s*\\[\\s*(-?\\d+.?\\d*)\\s+(-?\\d+.?\\d*)\\s+(-?\\d+.?\\d*)\\s*\\]"
outpoints = []
with open(filename) as fd:
for line in fd:
match = re.search(pattern % "OutputPoint", line)
outpoints.append(list(reversed([float(_) for _ in match.groups()])))
return outpoints
def transform(points, moving_image, transform_parameter_map):
"""Transform the points in the fixed coordinate space to moving
:param points: Points in the fixed coordinate space
:param moving_image: The moving image as loaded by SimpleITK (needed
by the transformation to find the image dimensions)
:param transform_parameter_map: The transform parameter map produced
by ElastixImageFilter after running.
:returns: the point coordinates in the moving coordinate space
"""
temp_dir = tempfile.mkdtemp()
try:
fixed_point_set_path = os.path.join(temp_dir, "fixed_points.txt")
write_point_set(fixed_point_set_path, points)
tif = sitk.TransformixImageFilter()
tif.SetTransformParameterMap(transform_parameter_map)
tif.SetFixedPointSetFileName(fixed_point_set_path)
tif.SetMovingImage(moving_image)
tif.SetOutputDirectory(temp_dir)
tif.LogToConsoleOn()
tif.Execute()
output_path = os.path.join(temp_dir, "outputpoints.txt")
out_a = np.memmap(output_path, np.uint8, mode="r")
shape = (len(points), len(points[0]))
result = np.zeros(shape, np.float32)
parse_pts_file(out_a, result)
return result[:,::-1]
finally:
shutil.rmtree(temp_dir)
def main():
global FINAL_GRID_SPACING_IN_VOXELS
args = parse_args()
if args.final_grid_spacing is not None:
FINAL_GRID_SPACING_IN_VOXELS = args.final_grid_spacing.split(",")
fixed_image = sitk.ReadImage(args.fixed_file)
moving_image = sitk.ReadImage(args.moving_file)
aligned_file = args.aligned_file
fixed_point_file = args.fixed_point_file
alignment_point_file = args.alignment_point_file
transform_pm = align(fixed_image, moving_image, aligned_file,
args.transform_parameters_folder)
if alignment_point_file is not None:
with open(fixed_point_file) as fd:
points = json.load(fd)
if args.xyz:
points = [_[::-1] for _ in points]
out_points = transform(points, moving_image, transform_pm)
out_points = out_points.astype(float).tolist()
with open(alignment_point_file, "w") as fd:
json.dump(dict(reference=points,
moving=out_points), fd, indent=2)
if __name__=="__main__":
main()
| [
"argparse.ArgumentParser",
"SimpleITK.ElastixImageFilter",
"numpy.memmap",
"SimpleITK.VectorOfParameterMap",
"os.path.join",
"os.getcwd",
"os.chdir",
"json.load",
"numpy.zeros",
"SimpleITK.GetDefaultParameterMap",
"tempfile.mkdtemp",
"SimpleITK.TransformixImageFilter",
"shutil.rmtree",
"SimpleITK.ReadImage",
"re.search"
] | [((360, 385), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (383, 385), False, 'import argparse\n'), ((1912, 1939), 'SimpleITK.VectorOfParameterMap', 'sitk.VectorOfParameterMap', ([], {}), '()\n', (1937, 1939), True, 'import SimpleITK as sitk\n'), ((5004, 5029), 'SimpleITK.ElastixImageFilter', 'sitk.ElastixImageFilter', ([], {}), '()\n', (5027, 5029), True, 'import SimpleITK as sitk\n'), ((7114, 7132), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7130, 7132), False, 'import tempfile\n'), ((8110, 8141), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['args.fixed_file'], {}), '(args.fixed_file)\n', (8124, 8141), True, 'import SimpleITK as sitk\n'), ((8161, 8193), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['args.moving_file'], {}), '(args.moving_file)\n', (8175, 8193), True, 'import SimpleITK as sitk\n'), ((1973, 2009), 'SimpleITK.GetDefaultParameterMap', 'sitk.GetDefaultParameterMap', (['"""rigid"""'], {}), "('rigid')\n", (2000, 2009), True, 'import SimpleITK as sitk\n'), ((2439, 2476), 'SimpleITK.GetDefaultParameterMap', 'sitk.GetDefaultParameterMap', (['"""affine"""'], {}), "('affine')\n", (2466, 2476), True, 'import SimpleITK as sitk\n'), ((3034, 3072), 'SimpleITK.GetDefaultParameterMap', 'sitk.GetDefaultParameterMap', (['"""bspline"""'], {}), "('bspline')\n", (3061, 3072), True, 'import SimpleITK as sitk\n'), ((5237, 5248), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5246, 5248), False, 'import os\n'), ((5403, 5419), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (5411, 5419), False, 'import os\n'), ((7173, 7215), 'os.path.join', 'os.path.join', (['temp_dir', '"""fixed_points.txt"""'], {}), "(temp_dir, 'fixed_points.txt')\n", (7185, 7215), False, 'import os\n'), ((7284, 7313), 'SimpleITK.TransformixImageFilter', 'sitk.TransformixImageFilter', ([], {}), '()\n', (7311, 7313), True, 'import SimpleITK as sitk\n'), ((7590, 7632), 'os.path.join', 'os.path.join', (['temp_dir', '"""outputpoints.txt"""'], {}), "(temp_dir, 'outputpoints.txt')\n", (7602, 7632), False, 'import os\n'), ((7649, 7691), 'numpy.memmap', 'np.memmap', (['output_path', 'np.uint8'], {'mode': '"""r"""'}), "(output_path, np.uint8, mode='r')\n", (7658, 7691), True, 'import numpy as np\n'), ((7755, 7782), 'numpy.zeros', 'np.zeros', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (7763, 7782), True, 'import numpy as np\n'), ((7872, 7895), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (7885, 7895), False, 'import shutil\n'), ((5322, 5358), 'os.chdir', 'os.chdir', (['transform_parameter_folder'], {}), '(transform_parameter_folder)\n', (5330, 5358), False, 'import os\n'), ((6442, 6482), 're.search', 're.search', (["(pattern % 'OutputPoint')", 'line'], {}), "(pattern % 'OutputPoint', line)\n", (6451, 6482), False, 'import re\n'), ((8559, 8572), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (8568, 8572), False, 'import json\n')] |
from pathlib import Path
import pygame
from janzeng.entity import Entity
PIVOTS = [
"topleft",
"topcenter",
"topright",
"midleft",
"center",
"midright",
"bottomleft",
"bottomcenter",
"bottomright",
]
class Sprite(Entity):
def __init__(self, *, image, pos, pivot="bottomcenter", alpha=True):
super().__init__()
if isinstance(image, Path):
self.image = pygame.image.load(image)
elif isinstance(image, str):
self.image = pygame.image.load(Path(image))
elif isinstance(image, pygame.Surface):
self.image = image
else:
raise TypeError(f"Image must be Path-object or string, got {type(image)}")
if alpha:
self.image = self.image.convert_alpha()
else:
self.image = self.image.convert()
self.pivot = pivot
self.rect = self.image.get_rect(**{pivot: pos})
self._position = pygame.Vector2(pos)
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
setattr(self.rect, self.pivot, (int(value[0]), int(value[1])))
def render(self, surface):
surface.blit(self.image, self.rect)
| [
"pygame.image.load",
"pygame.Vector2",
"pathlib.Path"
] | [((960, 979), 'pygame.Vector2', 'pygame.Vector2', (['pos'], {}), '(pos)\n', (974, 979), False, 'import pygame\n'), ((424, 448), 'pygame.image.load', 'pygame.image.load', (['image'], {}), '(image)\n', (441, 448), False, 'import pygame\n'), ((529, 540), 'pathlib.Path', 'Path', (['image'], {}), '(image)\n', (533, 540), False, 'from pathlib import Path\n')] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.dashboard import DashboardPage
class TestPagination:
SEARCH_TERM = u'firefox'
@pytest.mark.nondestructive
def test_search_pagination(self, mozwebqa):
dashboard_pg = DashboardPage(mozwebqa)
dashboard_pg.go_to_dashboard_page()
# Set the date range to 2013-01-01 -> today so that we're more
# likely to have so many messages in the results that it
# paginates. Otherwise it might not paginate on stage or local
# environments.
dashboard_pg.set_date_range('2013-01-01')
dashboard_pg.search_for(self.SEARCH_TERM)
# Check the total message count. If it's less than 50 (two
# pages worth), then we will fail with a helpful message.
assert dashboard_pg.total_message_count >= 50, 'Not enough data to test. Add more data.'
assert dashboard_pg.is_older_messages_link_visible is True
assert dashboard_pg.is_newer_messages_link_visible is False
assert dashboard_pg.older_messages_link == 'Older Messages'
dashboard_pg.click_older_messages()
assert dashboard_pg.search_term_from_url == self.SEARCH_TERM
assert dashboard_pg.is_older_messages_link_visible is True
assert dashboard_pg.is_newer_messages_link_visible is True
assert dashboard_pg.older_messages_link == 'Older Messages'
assert dashboard_pg.newer_messages_link == 'Newer Messages'
assert dashboard_pg.page_from_url == 2
dashboard_pg.click_newer_messages()
assert dashboard_pg.search_term_from_url == self.SEARCH_TERM
assert dashboard_pg.is_older_messages_link_visible is True
assert dashboard_pg.is_newer_messages_link_visible is False
assert dashboard_pg.older_messages_link == 'Older Messages'
assert dashboard_pg.page_from_url == 1
| [
"pages.dashboard.DashboardPage"
] | [((414, 437), 'pages.dashboard.DashboardPage', 'DashboardPage', (['mozwebqa'], {}), '(mozwebqa)\n', (427, 437), False, 'from pages.dashboard import DashboardPage\n')] |
import math
catop = int(input('Digite aqui o valor do cateto oposto: '))
catad = int(input('Digite aqui o valor do cateto adjascente: '))
hip = math.sqrt(catop**2 + catad**2)
print('O valor do da hipotenusa é: ', hip)
| [
"math.sqrt"
] | [((144, 178), 'math.sqrt', 'math.sqrt', (['(catop ** 2 + catad ** 2)'], {}), '(catop ** 2 + catad ** 2)\n', (153, 178), False, 'import math\n')] |
from configparser import ConfigParser
from x_com import DB_CONFIG
def config( filename=None, section='postgresql' ):
filename = filename if filename is not None else DB_CONFIG
parser = ConfigParser()
parser.read(filename)
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise FileNotFoundError('Section {0} not found in the {1} file'.format(section, filename))
return db
| [
"configparser.ConfigParser"
] | [((197, 211), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (209, 211), False, 'from configparser import ConfigParser\n')] |
import matplotlib.pyplot as plt
import numpy as np
import time
class PlotResults:
"""
Class to plot the results.
"""
def plot_results(self, data1, data2, label1, label2, filename):
"""
This method receives two lists of data point (data1 and data2) and plots
a scatter plot with the information. The lists store statistics about individual search
problems such as the number of nodes a search algorithm needs to expand to solve the problem.
The function assumes that data1 and data2 have the same size.
label1 and label2 are the labels of the axes of the scatter plot.
filename is the name of the file in which the plot will be saved.
"""
_, ax = plt.subplots()
ax.scatter(data1, data2, s=100, c="g", alpha=0.5, cmap=plt.cm.coolwarm, zorder=10)
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.xlabel(label1)
plt.ylabel(label2)
plt.grid()
plt.savefig(filename)
class Grid:
"""
Class to represent an assignment of values to the 81 variables defining a Sudoku puzzle.
Variable _cells stores a matrix with 81 entries, one for each variable in the puzzle.
Each entry of the matrix stores the domain of a variable. Initially, the domains of variables
that need to have their values assigned are 123456789; the other domains are limited to the value
initially assigned on the grid. Backtracking search and AC3 reduce the the domain of the variables
as they proceed with search and inference.
"""
def __init__(self):
self._cells = []
self._complete_domain = "123456789"
self._width = 9
def copy(self):
"""
Returns a copy of the grid.
"""
copy_grid = Grid()
copy_grid._cells = [row.copy() for row in self._cells]
return copy_grid
def get_cells(self):
"""
Returns the matrix with the domains of all variables in the puzzle.
"""
return self._cells
def get_width(self):
"""
Returns the width of the grid.
"""
return self._width
def read_file(self, string_puzzle):
"""
Reads a Sudoku puzzle from string and initializes the matrix _cells.
This is a valid input string:
4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......
This is translated into the following Sudoku grid:
- - - - - - - - - - - - -
| 4 . . | . . . | 8 . 5 |
| . 3 . | . . . | . . . |
| . . . | 7 . . | . . . |
- - - - - - - - - - - - -
| . 2 . | . . . | . 6 . |
| . . . | . 8 . | 4 . . |
| . . . | . 1 . | . . . |
- - - - - - - - - - - - -
| . . . | 6 . 3 | . 7 . |
| 5 . . | 2 . . | . . . |
| 1 . 4 | . . . | . . . |
- - - - - - - - - - - - -
"""
i = 0
row = []
for p in string_puzzle:
if p == '.':
row.append(self._complete_domain)
else:
row.append(p)
i += 1
if i % self._width == 0:
self._cells.append(row)
row = []
def print(self):
"""
Prints the grid on the screen. Example:
- - - - - - - - - - - - -
| 4 . . | . . . | 8 . 5 |
| . 3 . | . . . | . . . |
| . . . | 7 . . | . . . |
- - - - - - - - - - - - -
| . 2 . | . . . | . 6 . |
| . . . | . 8 . | 4 . . |
| . . . | . 1 . | . . . |
- - - - - - - - - - - - -
| . . . | 6 . 3 | . 7 . |
| 5 . . | 2 . . | . . . |
| 1 . 4 | . . . | . . . |
- - - - - - - - - - - - -
"""
for _ in range(self._width + 4):
print('-', end=" ")
print()
for i in range(self._width):
print('|', end=" ")
for j in range(self._width):
if len(self._cells[i][j]) == 1:
print(self._cells[i][j], end=" ")
elif len(self._cells[i][j]) > 1:
print('.', end=" ")
else:
print(';', end=" ")
if (j + 1) % 3 == 0:
print('|', end=" ")
print()
if (i + 1) % 3 == 0:
for _ in range(self._width + 4):
print('-', end=" ")
print()
print()
def print_domains(self):
"""
Print the domain of each variable for a given grid of the puzzle.
"""
for row in self._cells:
print(row)
def is_solved(self):
"""
Returns True if the puzzle is solved and False otherwise.
"""
for i in range(self._width):
for j in range(self._width):
if len(self._cells[i][j]) != 1:
return False
return True
class VarSelector:
"""
Interface for selecting variables in a partial assignment.
Extend this class when implementing a new heuristic for variable selection.
"""
def select_variable(self, grid):
pass
class FirstAvailable(VarSelector):
"""
Naïve method for selecting variables; simply returns the first variable encountered whose domain is larger than one.
"""
def select_variable(self, grid):
# Implement here the first available heuristic
# Sequentially loop all the cells until finding an unassigned variable
for i in range(grid.get_width()):
for j in range(grid.get_width()):
if len(grid.get_cells()[i][j]) > 1:
return i, j
return -1, -1 # All variables has been assigned, so return (-1, -1)
class MRV(VarSelector):
"""
Implements the MRV heuristic, which returns one of the variables with smallest domain.
"""
def select_variable(self, grid):
# Implement here the mrv heuristic
minimum_remaining_value = float("inf") # infinity
minimum_remaining_value_position = (-1, -1)
# Loop all the cells, find the one with minimum length
for i in range(grid.get_width()):
for j in range(grid.get_width()):
l = len(grid.get_cells()[i][j])
if (l != 1) and (l < minimum_remaining_value):
minimum_remaining_value = len(grid.get_cells()[i][j])
minimum_remaining_value_position = (i, j)
return minimum_remaining_value_position
class AC3:
"""
This class implements the methods needed to run AC3 on Sudoku.
"""
def remove_domain_row(self, grid, row, column):
"""
Given a matrix (grid) and a cell on the grid (row and column) whose domain is of size 1 (i.e., the variable has its
value assigned), this method removes the value of (row, column) from all variables in the same row.
"""
variables_assigned = []
for j in range(grid.get_width()):
if j != column:
new_domain = grid.get_cells()[row][j].replace(grid.get_cells()[row][column], '')
if len(new_domain) == 0:
return None, True
if len(new_domain) == 1 and len(grid.get_cells()[row][j]) > 1:
variables_assigned.append((row, j))
grid.get_cells()[row][j] = new_domain
return variables_assigned, False
def remove_domain_column(self, grid, row, column):
"""
Given a matrix (grid) and a cell on the grid (row and column) whose domain is of size 1 (i.e., the variable has its
value assigned), this method removes the value of (row, column) from all variables in the same column.
"""
variables_assigned = []
for j in range(grid.get_width()):
if j != row:
new_domain = grid.get_cells()[j][column].replace(grid.get_cells()[row][column], '')
if len(new_domain) == 0:
return None, True
if len(new_domain) == 1 and len(grid.get_cells()[j][column]) > 1:
variables_assigned.append((j, column))
grid.get_cells()[j][column] = new_domain
return variables_assigned, False
def remove_domain_unit(self, grid, row, column):
"""
Given a matrix (grid) and a cell on the grid (row and column) whose domain is of size 1 (i.e., the variable has its
value assigned), this method removes the value of (row, column) from all variables in the same unit.
"""
variables_assigned = []
row_init = (row // 3) * 3
column_init = (column // 3) * 3
for i in range(row_init, row_init + 3):
for j in range(column_init, column_init + 3):
if i == row and j == column:
continue
new_domain = grid.get_cells()[i][j].replace(grid.get_cells()[row][column], '')
if len(new_domain) == 0:
return None, True
if len(new_domain) == 1 and len(grid.get_cells()[i][j]) > 1:
variables_assigned.append((i, j))
grid.get_cells()[i][j] = new_domain
return variables_assigned, False
def pre_process_consistency(self, grid):
"""
This method enforces arc consistency for the initial grid of the puzzle.
The method runs AC3 for the arcs involving the variables whose values are
already assigned in the initial grid.
"""
# Implement here the code for making the CSP arc consistent as a pre-processing step; this method should be called once before search
Q = []
for i in range(grid.get_width()):
for j in range(grid.get_width()):
if len(grid.get_cells()[i][j]) == 1:
Q.append((i, j))
temp = self.consistency(grid, Q)
if (temp == "failure"):
return None, "failure"
return grid, "success"
def consistency(self, grid, Q):
"""
This is a domain-specific implementation of AC3 for Sudoku.
It keeps a set of variables to be processed (Q) which is provided as input to the method.
Since this is a domain-specific implementation, we don't need to maintain a graph and a set
of arcs in memory. We can store in Q the cells of the grid and, when processing a cell, we
ensure arc consistency of all variables related to this cell by removing the value of
cell from all variables in its column, row, and unit.
For example, if the method is used as a preprocessing step, then Q is initialized with
all cells that start with a number on the grid. This method ensures arc consistency by
removing from the domain of all variables in the row, column, and unit the values of
the cells given as input. Like the general implementation of AC3, the method adds to
Q all variables that have their values assigned during the propagation of the contraints.
The method returns True if AC3 detected that the problem can't be solved with the current
partial assignment; the method returns False otherwise.
"""
# Implement here the domain-dependent version of AC3.
while len(Q) != 0:
row, column = Q.pop()
variables_assigned1, failure1 = self.remove_domain_row(grid, row, column)
variables_assigned2, failure2 = self.remove_domain_column(grid, row, column)
variables_assigned3, failure3 = self.remove_domain_unit(grid, row, column)
if failure1 or failure2 or failure3:
return "failure"
variables_assigned = variables_assigned1 + variables_assigned2 + variables_assigned3
for t in variables_assigned:
if t not in Q:
Q.append(t)
return "success"
class Backtracking:
"""
Class that implements backtracking search for solving CSPs.
"""
def search(self, grid, var_selector):
"""
Implements backtracking search with inference.
"""
# Backtracking without inference
# Checking whether the tree is complete or not
complete = True
for i in range(grid.get_width()):
for j in range(grid.get_width()):
if len(grid.get_cells()[i][j]) > 1:
complete = False
break
if not complete:
break
if complete:
return grid
row, col = var_selector.select_variable(grid) # Variable to be assigned
for d in grid.get_cells()[row][col]:
if not self.consistent_check(grid, d, row, col):
continue
# If consistent:
copy_grid = grid.copy() # Copy the grid
copy_grid.get_cells()[row][col] = d # Assigned value to the copy version
rb = self.search(copy_grid, var_selector)
if rb != "failure":
return rb
return "failure"
def search_AC3(self, grid, var_selector):
# Backtracking with AC3 inference
# Using the helper function to do all the work
ac3 = AC3()
ac3.pre_process_consistency(grid) # Pre-process the grid before running Backtracking
return self.helper_search_AC3(grid, var_selector)
def helper_search_AC3(self, grid, var_selector):
# Backtracking with AC3 inference
# Checking whether the tree is complete or not
complete = True
for i in range(grid.get_width()):
for j in range(grid.get_width()):
if len(grid.get_cells()[i][j]) > 1:
complete = False
break
if not complete:
break
if complete:
return grid
row, col = var_selector.select_variable(grid) # Variable to be assigned
for d in grid.get_cells()[row][col]:
if not self.consistent_check(grid, d, row, col):
continue
# If consistent:
copy_grid = grid.copy() # Copy the grid
copy_grid.get_cells()[row][col] = d # Assigned value to the copy version
# Find unselected variables in copy_grid, then run AC3 on it
ac3 = AC3()
ri = ac3.consistency(copy_grid, [(row, col)]) # Run AC3 inference
if (ri != "failure"):
rb = self.helper_search_AC3(copy_grid, var_selector)
if rb != "failure":
return rb
return "failure"
def consistent_check(self, grid, d, row, col):
# Checking the consistency if we assign value d to position (row, col) in the matrix
# Strategy: Check consistency in row and col, then check consistency in the unit
# Check consistency within column and row
for i in range(grid.get_width()):
temp1 = grid.get_cells()[row][i] # Iterate column
temp2 = grid.get_cells()[i][col] # Iterate row
if len(temp1) == 1 and temp1 == d and i != col:
return False
if len(temp2) == 1 and temp2 == d and i != row:
return False
unit_row_position = (-1, -1)
# unit_row_position is the range of row of the unit var is in
# (-1, 1) is just a dummy value
if (row % 3 == 0):
unit_row_position = (row, row + 2)
elif (row % 3 == 1):
unit_row_position = (row - 1, row + 1)
else:
unit_row_position = (row - 2, row)
unit_col_position = (-1, -1)
# unit_col_position is the range of row of the unit var is in
# (-1, 1) is just a dummy value
if (col % 3 == 0):
unit_col_position = (col, col + 2)
elif (col % 3 == 1):
unit_col_position = (col - 1, col + 1)
else:
unit_col_position = (col - 2, col)
# Check consistency within a unit
for i in range(unit_row_position[0], unit_row_position[1] + 1):
for j in range(unit_col_position[0], unit_col_position[1] + 1):
temp = grid.get_cells()[i][j]
if len(temp) == 1 and temp == d and (i != row or j != col):
return False
return True
file = open('top95.txt', 'r')
problems = file.readlines()
running_time_mrv = []
running_time_first_available = []
i = 1
for p in problems:
print("\n\nSudoku", i)
g = Grid()
g.read_file(p)
g.print()
# MRV
b = Backtracking()
mrv = MRV()
start = time.time()
temp = b.search_AC3(g, mrv)
end = time.time()
running_time_mrv.append(end - start)
temp.print()
print("Is solved:", temp.is_solved())
# FA
fa = FirstAvailable()
start = time.time()
temp = b.search_AC3(g, fa)
end = time.time()
running_time_first_available.append(end - start)
temp.print()
print("Is solved:", temp.is_solved())
i += 1
plotter = PlotResults()
plotter.plot_results(running_time_mrv, running_time_first_available, "Running Time Backtracking (MRV)", "Running Time Backtracking (FA)", "running_time")
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"time.time",
"matplotlib.pyplot.subplots"
] | [((17109, 17120), 'time.time', 'time.time', ([], {}), '()\n', (17118, 17120), False, 'import time\n'), ((17163, 17174), 'time.time', 'time.time', ([], {}), '()\n', (17172, 17174), False, 'import time\n'), ((17323, 17334), 'time.time', 'time.time', ([], {}), '()\n', (17332, 17334), False, 'import time\n'), ((17376, 17387), 'time.time', 'time.time', ([], {}), '()\n', (17385, 17387), False, 'import time\n'), ((750, 764), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (762, 764), True, 'import matplotlib.pyplot as plt\n'), ((1176, 1194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['label1'], {}), '(label1)\n', (1186, 1194), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1221), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['label2'], {}), '(label2)\n', (1213, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1240), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1238, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1270), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1260, 1270), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import codecs
gold_file=sys.argv[1]
pred_file=sys.argv[2]
def read_data(fname):
for line in codecs.open(fname):
line = line.strip().split()
tagged = [x.rsplit("/",1) for x in line]
yield tagged
def normalize_bio(tagged_sent):
last_bio, last_type = "O","O"
normalized = []
for word, tag in tagged_sent:
if tag == "O": tag = "O-O"
bio,typ = tag.split("-",1)
if bio=="I" and last_bio=="O": bio="B"
if bio=="I" and last_type!=typ: bio="B"
normalized.append((word,(bio,typ)))
last_bio,last_type=bio,typ
return normalized
def compare_accuracy(gold, pred):
assert(len(gold_data)==len(pred_data))
correct = 0.0
total = 0.0
for gold_sent, pred_sent in zip(gold, pred):
assert(len(gold_sent)==len(pred_sent))
gws = [w for w,t in gold_sent]
pws = [w for w,t in pred_sent]
assert(gws==pws)
gtags = [t for w,t in gold_sent]
ptags = [t for w,t in pred_sent]
correct += sum([1 if g==p else 0 for g,p in zip(gold_sent, pred_sent)])
total += len(gold_sent)
return correct/total
def get_entities(sent):
ent=[]
for i,(word,tag) in enumerate(sent):
bio,typ=tag
if bio=="B":
if ent: yield tuple(ent)
ent=[]
ent.append(i)
ent.append(typ)
ent.append(word)
if bio=="I":
ent.append(word)
if bio=="O":
if ent: yield tuple(ent)
ent=[]
if ent: yield tuple(ent)
if __name__=='__main__':
gold_data = [normalize_bio(tagged) for tagged in read_data(gold_file)]
pred_data = [normalize_bio(tagged) for tagged in read_data(pred_file)]
assert(len(gold_data)==len(pred_data))
gold_entities = set()
for i,sent in enumerate(gold_data):
for entity in get_entities(sent):
gold_entities.add((i,entity))
pred_entities = set()
for i,sent in enumerate(pred_data):
for entity in get_entities(sent):
pred_entities.add((i,entity))
print
acc = compare_accuracy(gold_data, pred_data)
print ("Accuracy:", acc)
print
prec = len(gold_entities.intersection(pred_entities)) / float(len(pred_entities))
rec = len(gold_entities.intersection(pred_entities)) / float(len(gold_entities))
print("All-types \tPrec:%s Rec:%s" % (prec, rec))
types = set([e[1][1] for e in gold_entities]) - set(["O"])
for t in types:
gents = set([e for e in gold_entities if e[1][1]==t])
pents = set([e for e in pred_entities if e[1][1]==t])
prec = len(gents.intersection(pents)) / float(len(pents))
rec = len(gents.intersection(pents)) / float(len(gents))
print("%10s \tPrec:%s Rec:%s" % (t, prec, rec))
| [
"codecs.open"
] | [((108, 126), 'codecs.open', 'codecs.open', (['fname'], {}), '(fname)\n', (119, 126), False, 'import codecs\n')] |
import checkers
env = checkers.CheckersAgent()
state = env.observation()
actions = env.possible_actions(state)
s, r, done, info = env.step(actions[0])
| [
"checkers.CheckersAgent"
] | [((105, 129), 'checkers.CheckersAgent', 'checkers.CheckersAgent', ([], {}), '()\n', (127, 129), False, 'import checkers\n')] |
from rest_framework import serializers
from .models import Question, Choice
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ["id", "message"]
class QuestionSerializer(serializers.ModelSerializer):
choices = ChoiceSerializer(many=True, read_only=True)
class Meta:
model = Question
fields = ["id", "enable", "message", "min_num_chosen", "max_num_chosen", "choices"]
class ChoiceCountSerializer(serializers.ModelSerializer):
count = serializers.IntegerField()
class Meta:
model = Choice
fields = ["id", "message", "count"]
| [
"rest_framework.serializers.IntegerField"
] | [((526, 552), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (550, 552), False, 'from rest_framework import serializers\n')] |
"""PPPIPAM's setup script."""
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pppipam', # Required
version='0.1.0', # Required
description="Poor person's Python IP Address Manager", # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional
url='https://github.com/ayharano/pppipam', # Optional
author='<NAME>', # Optional
author_email='<EMAIL>', # Optional
classifiers=[ # Optional
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
],
keywords='ip address management ipv4 ipv6 network system administration', # Optional
packages=find_packages(exclude=['tests']), # Required
project_urls={ # Optional
'Bug Reports': 'https://github.com/ayharano/pppipam/issues',
'Source': 'https://github.com/ayharano/pppipam',
},
python_requires='>=3.7, <4', # Optional
)
| [
"os.path.dirname",
"os.path.join",
"distutils.core.find_packages"
] | [((197, 219), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (209, 219), False, 'from os import path\n'), ((232, 260), 'os.path.join', 'path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (241, 260), False, 'from os import path\n'), ((1408, 1440), 'distutils.core.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (1421, 1440), False, 'from distutils.core import setup, find_packages\n')] |
from django.contrib import admin
from .models import ProductAquaticPlantPackage, ProductAquaticPlant
# Register your models here.
class ProductAquaticPlantAdminInline(admin.TabularInline):
model = ProductAquaticPlant
extra = 1
@admin.register(ProductAquaticPlantPackage)
class ProductAquaticPlantPackageAdmin(admin.ModelAdmin):
inlines = (ProductAquaticPlantAdminInline,)
list_display = ('id', 'name')
pass
@admin.register(ProductAquaticPlant)
class ProductAquaticPlantAdmin(admin.ModelAdmin):
filter_horizontal = ('image',)
pass | [
"django.contrib.admin.register"
] | [((237, 279), 'django.contrib.admin.register', 'admin.register', (['ProductAquaticPlantPackage'], {}), '(ProductAquaticPlantPackage)\n', (251, 279), False, 'from django.contrib import admin\n'), ((430, 465), 'django.contrib.admin.register', 'admin.register', (['ProductAquaticPlant'], {}), '(ProductAquaticPlant)\n', (444, 465), False, 'from django.contrib import admin\n')] |
import logging
import sys
import time
from node.core.commands import CustomCommand
from node.core.database import is_replica_set_initialized
logger = logging.getLogger(__name__)
class Command(CustomCommand):
help = 'Check if replica set is initialized' # noqa: A003
def add_arguments(self, parser):
parser.add_argument('-w', '--wait', action='store_true')
parser.add_argument('-t', '--timeout', type=int, default=60, help='Timeout in seconds')
parser.add_argument('-p', '--period', type=int, default=1, help='Check period')
def handle(self, *args, **options):
expiration = time.time() + options['timeout']
should_wait = options['wait']
while True:
if is_replica_set_initialized():
self.write_success('Replica set is initialized')
break
if should_wait:
now = time.time()
if now < expiration:
self.write(f'Replica set is not initialized yet, waiting (ts: {now})..')
time.sleep(1)
else:
self.write_error('Timed out')
sys.exit(1)
else:
self.write_error('Replica set is not initialized')
sys.exit(1)
| [
"logging.getLogger",
"time.sleep",
"sys.exit",
"time.time",
"node.core.database.is_replica_set_initialized"
] | [((152, 179), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (169, 179), False, 'import logging\n'), ((624, 635), 'time.time', 'time.time', ([], {}), '()\n', (633, 635), False, 'import time\n'), ((730, 758), 'node.core.database.is_replica_set_initialized', 'is_replica_set_initialized', ([], {}), '()\n', (756, 758), False, 'from node.core.database import is_replica_set_initialized\n'), ((898, 909), 'time.time', 'time.time', ([], {}), '()\n', (907, 909), False, 'import time\n'), ((1279, 1290), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1287, 1290), False, 'import sys\n'), ((1060, 1073), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1070, 1073), False, 'import time\n'), ((1166, 1177), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1174, 1177), False, 'import sys\n')] |
from pyspark.sql.types import *
def window(x, y, yerr, start, end) -> (
StructType(
[
StructField("x", ArrayType(FloatType())),
StructField("y", ArrayType(FloatType())),
StructField("yerr", ArrayType(FloatType())),
]
)
):
import numpy as np
x = np.array(x)
y = np.array(y)
yerr = np.array(yerr)
in_window = (x > start) & (x < end)
return {
"x" : x[in_window].tolist(),
"y" : y[in_window].tolist(),
"yerr" : yerr[in_window].tolist(),
}
def window_udf():
from fit_utils import make_udf_from_annotated_function
return make_udf_from_annotated_function(window)
def around_window(x, y, yerr, start, end, wiggle=0.5) -> (
StructType(
[
StructField("x", ArrayType(FloatType())),
StructField("y", ArrayType(FloatType())),
StructField("yerr", ArrayType(FloatType())),
]
)
):
import numpy as np
width = end - start
x = np.array(x)
y = np.array(y)
yerr = np.array(yerr)
min_x = start - wiggle * width
max_x = end + wiggle * width
in_window = (x > min_x) & (x < max_x)
return {
"x" : x[in_window].tolist(),
"y" : y[in_window].tolist(),
"yerr" : yerr[in_window].tolist(),
}
def around_window_udf(**kwargs):
from functools import partial
from fit_utils import make_udf_from_annotated_function
return make_udf_from_annotated_function(partial(around_window, **kwargs))
| [
"functools.partial",
"numpy.array",
"fit_utils.make_udf_from_annotated_function"
] | [((314, 325), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (322, 325), True, 'import numpy as np\n'), ((334, 345), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (342, 345), True, 'import numpy as np\n'), ((357, 371), 'numpy.array', 'np.array', (['yerr'], {}), '(yerr)\n', (365, 371), True, 'import numpy as np\n'), ((647, 687), 'fit_utils.make_udf_from_annotated_function', 'make_udf_from_annotated_function', (['window'], {}), '(window)\n', (679, 687), False, 'from fit_utils import make_udf_from_annotated_function\n'), ((1014, 1025), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1022, 1025), True, 'import numpy as np\n'), ((1034, 1045), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1042, 1045), True, 'import numpy as np\n'), ((1057, 1071), 'numpy.array', 'np.array', (['yerr'], {}), '(yerr)\n', (1065, 1071), True, 'import numpy as np\n'), ((1509, 1541), 'functools.partial', 'partial', (['around_window'], {}), '(around_window, **kwargs)\n', (1516, 1541), False, 'from functools import partial\n')] |
from query import FilterableQuery
class Update(FilterableQuery):
"""Implements an UPDATE query."""
template = 'UPDATE {table} SET {updates} {conditions}'
def __init__(self, table=None, **kwargs):
FilterableQuery.__init__(self, table)
self.updates = kwargs
def set(self, **kwargs):
"""Similar to Insert.set(), this will update a column's value."""
self.updates.update(kwargs)
return self
@property
def statement(self):
update_list = ['{} = {}'.format(k, self.placeholder) for k in self.updates.keys()]
return self.template.format(
table = self.table,
updates = ', '.join(update_list),
conditions = self.where_clause
).strip()
@property
def variables(self):
return self.updates.values() + self.conditions.values()
| [
"query.FilterableQuery.__init__"
] | [((220, 257), 'query.FilterableQuery.__init__', 'FilterableQuery.__init__', (['self', 'table'], {}), '(self, table)\n', (244, 257), False, 'from query import FilterableQuery\n')] |
from absl import app
from absl import flags
from camera_replay_operator import CameraReplayOperator
from detection_operator import DetectionOperator
from tracker_crt_operator import TrackerCRTOperator
from tracker_cv2_operator import TrackerCV2Operator
import erdos.graph
FLAGS = flags.FLAGS
flags.DEFINE_string('framework', 'ros',
'Execution framework to use: ros | ray.')
flags.DEFINE_integer('num_cameras', 1, 'Number of cameras.')
flags.DEFINE_bool('obj_detection', False,
'True to enable object detection operator')
flags.DEFINE_bool('obj_tracking', False,
'True to enable object tracking operator')
flags.DEFINE_string('tracker_type', 'cv2', 'Tracker type: cv2 | crt')
def main(argv):
graph = erdos.graph.get_current_graph()
camera_ops = []
for i in range(0, FLAGS.num_cameras, 1):
op_name = 'camera{}'.format(i)
camera_op = graph.add(
CameraReplayOperator,
name=op_name,
setup_args={'op_name': op_name})
camera_ops.append(camera_op)
if FLAGS.obj_detection:
obj_detector_op = graph.add(DetectionOperator, name='detection')
graph.connect(camera_ops, [obj_detector_op])
if FLAGS.obj_tracking:
tracker_op = None
if FLAGS.tracker_type == 'cv2':
tracker_op = graph.add(TrackerCV2Operator, name='tracker')
elif FLAGS.tracker_type == 'crt':
tracker_op = graph.add(TrackerCRTOperator, name='tracker')
graph.connect(camera_ops + [obj_detector_op], [tracker_op])
graph.execute(FLAGS.framework)
if __name__ == '__main__':
app.run(main)
| [
"absl.app.run",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_string",
"absl.flags.DEFINE_bool"
] | [((295, 380), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""framework"""', '"""ros"""', '"""Execution framework to use: ros | ray."""'], {}), "('framework', 'ros',\n 'Execution framework to use: ros | ray.')\n", (314, 380), False, 'from absl import flags\n'), ((397, 457), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_cameras"""', '(1)', '"""Number of cameras."""'], {}), "('num_cameras', 1, 'Number of cameras.')\n", (417, 457), False, 'from absl import flags\n'), ((458, 547), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""obj_detection"""', '(False)', '"""True to enable object detection operator"""'], {}), "('obj_detection', False,\n 'True to enable object detection operator')\n", (475, 547), False, 'from absl import flags\n'), ((562, 649), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""obj_tracking"""', '(False)', '"""True to enable object tracking operator"""'], {}), "('obj_tracking', False,\n 'True to enable object tracking operator')\n", (579, 649), False, 'from absl import flags\n'), ((664, 733), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tracker_type"""', '"""cv2"""', '"""Tracker type: cv2 | crt"""'], {}), "('tracker_type', 'cv2', 'Tracker type: cv2 | crt')\n", (683, 733), False, 'from absl import flags\n'), ((1672, 1685), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1679, 1685), False, 'from absl import app\n')] |
"""Generate a log file from a folder of data files."""
import argparse
import csv
import logging
import os
import pathlib
logger = logging.getLogger(__name__)
def generate_log(folder):
"""Generate a log file for a folder of data.
Parameters
----------
folder : pathlib.Path
Folder containing data.
Returns
-------
log_path : pathlib.Path
New log file path.
"""
logger.info(f"Generating a log file for folder: {folder}")
log_path = folder.joinpath("log.log")
files = [
p
for p in folder.iterdir()
if p.suffix in [".liv1", ".liv2", ".mpp", ".voc", ".jsc", ".div1", ".div2"]
]
# sort by date modified to preserve measurement order
files.sort(key=os.path.getctime)
# for each data file, list the log metadata
log = []
for i, file in enumerate(files):
with open(file) as f:
csv_reader = csv.reader(f, delimiter="\t")
# each row of metadata is a tab-separated list of length 2
meta = [row for row in csv_reader if len(row) == 2]
# store the header from the first iteration only
if i == 0:
header = [x[0] for x in meta]
log.append(header)
data = [x[1] for x in meta]
log.append(data)
# the list of metadata to a tab separated file
with open(log_path, mode="w", newline="\n") as lf:
csv_writer = csv.writer(lf, delimiter="\t")
for entry in log:
csv_writer.writerow(entry)
logger.info(f"Generated new log file successfully: {log_path}")
return log_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get folder path")
parser.add_argument(
"--folder",
default=str(pathlib.Path.cwd()),
help="Absolute path to data folder",
)
args = parser.parse_args()
generate_log(pathlib.Path(args.folder))
| [
"logging.getLogger",
"argparse.ArgumentParser",
"pathlib.Path",
"pathlib.Path.cwd",
"csv.writer",
"csv.reader"
] | [((134, 161), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (151, 161), False, 'import logging\n'), ((1650, 1704), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get folder path"""'}), "(description='Get folder path')\n", (1673, 1704), False, 'import argparse\n'), ((1423, 1453), 'csv.writer', 'csv.writer', (['lf'], {'delimiter': '"""\t"""'}), "(lf, delimiter='\\t')\n", (1433, 1453), False, 'import csv\n'), ((1890, 1915), 'pathlib.Path', 'pathlib.Path', (['args.folder'], {}), '(args.folder)\n', (1902, 1915), False, 'import pathlib\n'), ((920, 949), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (930, 949), False, 'import csv\n'), ((1770, 1788), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (1786, 1788), False, 'import pathlib\n')] |
from __future__ import annotations
import typing
import rich
import toolcli
from toolcli import spec
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': theme_command,
'help': 'display cli style theme',
'hidden': True,
'extra_data': ['parse_spec'],
}
def theme_command(parse_spec: spec.ParseSpec) -> None:
config = parse_spec.get('config')
if config is None:
print('no theme')
else:
theme = config.get('style_theme')
if theme is None:
print('no theme')
else:
longest = max(len(key) for key in theme.keys())
longest = longest + 1
for key, value in theme.items():
value_str = typing.cast(str, value)
rich.print(
(key + ':').ljust(longest)
+ ' '
+ '['
+ value_str
+ ']'
+ value_str
+ '[/'
+ value_str
+ ']',
)
| [
"typing.cast"
] | [((738, 761), 'typing.cast', 'typing.cast', (['str', 'value'], {}), '(str, value)\n', (749, 761), False, 'import typing\n')] |
#!/usr/bin/env python
"""
Functions for running dyPolyChord using compiled PolyChord C++ or Fortran
likelihoods.
"""
import os
class RunCompiledPolyChord(object):
"""Object for running a compiled PolyChord executable with specified
inputs."""
def __init__(self, executable_path, prior_str, **kwargs):
"""
Specify path to executable, priors and derived parameters.
Parameters
----------
executable_path: str
Path to compiled likelihood. If this is in the directory from which
dyPolyChord is being run, you may need to prepend "./" to the
executable name for it to work.
prior_str: str
String specifying prior in the format required for PolyChord .ini
files (see get_prior_block_str for more details).
config_str: str, optional
String to be written to [root].cfg file if required.
derived_str: str or None, optional
String specifying prior in the format required for PolyChord .ini
files (see prior_str for more details).
mpi_str: str or None, optional
Optional mpi command to preprend to run command.
For example to run with 8 processors, use mpi_str = 'mprun -np 8'.
Note that PolyChord must be installed with MPI enabled to allow
running with MPI.
"""
self.config_str = kwargs.pop('config_str', None)
self.derived_str = kwargs.pop('derived_str', None)
self.mpi_str = kwargs.pop('mpi_str', None)
if kwargs:
raise TypeError('unexpected **kwargs: {0}'.format(kwargs))
self.executable_path = executable_path
self.prior_str = prior_str
def __call__(self, settings_dict, comm=None):
"""
Run PolyChord with the input settings by writing a .ini file then using
the compiled likelihood specified in executable_path.
See the PolyChord documentation for more details.
Parameters
----------
settings_dict: dict
Input PolyChord settings.
comm: None, optional
Not used. Included only so __call__ has the same arguments as the
equivalent python function (which uses the comm argument for
runnign with MPI).
"""
assert os.path.isfile(self.executable_path), (
'executable not found: ' + self.executable_path)
assert comm is None, 'comm not used for compiled likelihoods.'
# Write settings to ini file
file_path = os.path.join(
settings_dict['base_dir'], settings_dict['file_root'])
with open(file_path + '.ini', 'w') as ini_file:
ini_file.write(self.ini_string(settings_dict))
# If required, write config file
if self.config_str is not None:
with open(file_path + '.cfg', 'w') as cfg_file:
cfg_file.write(self.config_str)
# Execute command
command_str = self.executable_path + ' ' + file_path + '.ini'
if self.mpi_str is not None:
command_str = self.mpi_str + ' ' + command_str
os.system(command_str)
def ini_string(self, settings):
"""Get a PolyChord format .ini file string based on the input settings.
Parameters
----------
settings: dict
Returns
-------
string: str
"""
string = ''
# Add the settings
for key, value in settings.items():
if key == 'nlives':
if value:
loglikes = sorted(settings['nlives'])
string += 'loglikes = ' + format_setting(loglikes) + '\n'
nlives = [settings['nlives'][ll] for ll in loglikes]
string += 'nlives = ' + format_setting(nlives) + '\n'
else:
string += key + ' = ' + format_setting(value) + '\n'
# Add the prior
string += self.prior_str
if self.derived_str is not None:
string += self.derived_str
return string
# Helper functions for making PolyChord prior strings
# ---------------------------------------------------
def get_prior_block_str(prior_name, prior_params, nparam, **kwargs):
"""
Returns a PolyChord format prior block for inclusion in PolyChord .ini
files.
See the PolyChord documentation for more details.
Parameters
----------
prior_name: str
Name of prior. See the PolyChord documentation for a list of currently
available priors and details of how to add your own.
prior_params: str, float or list of strs and floats
Parameters for the prior function.
nparam: int
Number of parameters.
start_param: int, optional
Where to start param numbering. For when multiple prior blocks are
being used.
block: int, optional
Number of block (only needed when using multiple prior blocks).
speed: int, optional
Use to specify fast and slow parameters if required. See the PolyChord
documentation for more details.
Returns
-------
block_str: str
PolyChord format prior block string for ini file.
"""
start_param = kwargs.pop('start_param', 1)
speed = kwargs.pop('speed', 1)
block = kwargs.pop('block', 1)
if kwargs:
raise TypeError('unexpected **kwargs: {0}'.format(kwargs))
block_str = ''
for i in range(start_param, nparam + start_param):
block_str += ('P : p{0} | \\theta_{{{0}}} | {1} | {2} | {3} |'
.format(i, speed, prior_name, block))
block_str += format_setting(prior_params) + '\n'
return block_str
def format_setting(setting):
"""
Return setting as string in the format needed for PolyChord's .ini files.
These use 'T' for True and 'F' for False, and require lists of numbers
written separated by spaces and without commas or brackets.
Parameters
----------
setting: (can be any type for which str(settings) works)
Returns
-------
str
"""
if isinstance(setting, bool):
return str(setting)[0]
elif isinstance(setting, (list, tuple)):
string = str(setting)
for char in [',', '[', ']', '(', ')']:
string = string.replace(char, '')
return string
else:
return str(setting)
def python_prior_to_str(prior, **kwargs):
"""Utility function for mapping python priors (of the type in
python_priors.py) to ini file format strings used for compiled (C++
or Fortran) likelihoods.
The input prior must correspond to a prior function set up in
PolyChord/src/polychord/priors.f90. You can easily add your own too.
Note that some of the priors are only available in PolyChord >= v1.15.
Parameters
----------
prior_obj: python prior object
Of the type defined in python_priors.py
kwargs: dict, optional
Passed to get_prior_block_str (see its docstring for more details).
Returns
-------
block_str: str
PolyChord format prior block string for ini file.
"""
nparam = kwargs.pop('nparam')
name = type(prior).__name__.lower()
if name == 'uniform':
parameters = [prior.minimum, prior.maximum]
elif name == 'poweruniform':
name = 'power_uniform'
parameters = [prior.minimum, prior.maximum, prior.power]
assert prior.power < 0, (
'compiled power_uniform currently only takes negative powers.'
'power={}'.format(prior.power))
elif name == 'gaussian':
parameters = [getattr(prior, 'mu', 0.0), prior.sigma]
if getattr(prior, 'half', False):
name = 'half_' + name
elif name == 'exponential':
parameters = [prior.lambd]
else:
raise TypeError('Not set up for ' + name)
if getattr(prior, 'sort', False):
name = 'sorted_' + name
if getattr(prior, 'adaptive', False):
name = 'adaptive_' + name
assert getattr(prior, "nfunc_min", 1) == 1, (
'compiled adaptive priors currently only take nfunc_min=1.'
'prior.nfunc_min={}'.format(prior.nfunc_min))
return get_prior_block_str(name, parameters, nparam, **kwargs)
def python_block_prior_to_str(bp_obj):
"""As for python_prior_to_str, but for BlockPrior objects of the type
defined in python_priors.py. python_prior_to_str is called seperately on
every block.
Parameters
----------
prior_obj: python prior object
Of the type defined in python_priors.py.
kwargs: dict, optional
Passed to get_prior_block_str (see its docstring for more details).
Returns
-------
block_str: str
PolyChord format prior block string for ini file.
"""
assert type(bp_obj).__name__ == 'BlockPrior', (
'Unexpected input object type: {}'.format(
type(bp_obj).__name__))
start_param = 1
string = ''
for i, prior in enumerate(bp_obj.prior_blocks):
string += python_prior_to_str(
prior, block=(i + 1), start_param=start_param,
nparam=bp_obj.block_sizes[i])
start_param += bp_obj.block_sizes[i]
return string
| [
"os.path.isfile",
"os.system",
"os.path.join"
] | [((2340, 2376), 'os.path.isfile', 'os.path.isfile', (['self.executable_path'], {}), '(self.executable_path)\n', (2354, 2376), False, 'import os\n'), ((2569, 2636), 'os.path.join', 'os.path.join', (["settings_dict['base_dir']", "settings_dict['file_root']"], {}), "(settings_dict['base_dir'], settings_dict['file_root'])\n", (2581, 2636), False, 'import os\n'), ((3154, 3176), 'os.system', 'os.system', (['command_str'], {}), '(command_str)\n', (3163, 3176), False, 'import os\n')] |
from django.contrib import admin
from . import models
class NotesAdmin(admin.ModelAdmin):
list_display = ('title',)
admin.site.register(models.Notes, NotesAdmin) | [
"django.contrib.admin.site.register"
] | [((123, 168), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Notes', 'NotesAdmin'], {}), '(models.Notes, NotesAdmin)\n', (142, 168), False, 'from django.contrib import admin\n')] |
import pyaf.tests.periodicities.period_test as per
per.buildModel((5 , 'D' , 100));
| [
"pyaf.tests.periodicities.period_test.buildModel"
] | [((52, 81), 'pyaf.tests.periodicities.period_test.buildModel', 'per.buildModel', (["(5, 'D', 100)"], {}), "((5, 'D', 100))\n", (66, 81), True, 'import pyaf.tests.periodicities.period_test as per\n')] |
import pathlib
import sys
NB_ROOT = pathlib.Path(__file__).parent
PYNSTEIN_ROOT = pathlib.Path(__file__).parent.parent.parent.parent / 'utilities' / 'pystein'
GR2_ROOT = NB_ROOT.parent
def setup_nb():
sys.path.append(PYNSTEIN_ROOT.as_posix())
sys.path.append(GR2_ROOT.as_posix())
| [
"pathlib.Path"
] | [((37, 59), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (49, 59), False, 'import pathlib\n'), ((83, 105), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'import pathlib\n')] |
def _valueOr(dictionary, key, default):
return dictionary[key] if key in dictionary else default
def _operationNetValue(operation):
return operation['price'] * _valueOr(operation, 'currencyConversion', 1.0)
def _printJson(data):
from bson.json_util import dumps
print(dumps(data, indent=2))
from .operations import Operations
from .profits import Profits
from .period import Period
from .categories import Categories
| [
"bson.json_util.dumps"
] | [((286, 307), 'bson.json_util.dumps', 'dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (291, 307), False, 'from bson.json_util import dumps\n')] |
import unittest
from test_gpeg import TestGPEG
from test_tpeg import TestTPEG
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(TestGPEG))
suite.addTests(unittest.makeSuite(TestTPEG))
return suite
| [
"unittest.TestSuite",
"unittest.makeSuite"
] | [((102, 122), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (120, 122), False, 'import unittest\n'), ((140, 168), 'unittest.makeSuite', 'unittest.makeSuite', (['TestGPEG'], {}), '(TestGPEG)\n', (158, 168), False, 'import unittest\n'), ((187, 215), 'unittest.makeSuite', 'unittest.makeSuite', (['TestTPEG'], {}), '(TestTPEG)\n', (205, 215), False, 'import unittest\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
import random
import torch.utils.data
from loguru import logger
import torch
import cv2
import numpy as np
import json
import glob
import hashlib
#from ..dataloading import get_yolox_datadir
#from .datasets_wrapper import Dataset
from yolox.data.datasets.datasets_wrapper import Dataset
from DOTA_devkit import dota_utils
from yolox.data.data_augment import OrientedValTransform, OrientedTrainTransform
from pathlib import Path
from tqdm import tqdm
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
dotav10_classes = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle',
'large-vehicle', 'ship', 'tennis-court', 'basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']
dotav15_classes = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle',
'large-vehicle', 'ship', 'tennis-court', 'basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter',
'container-crane']
def collate_fn(batch):
img, label, img_info, img_id = zip(*batch)
for i, l in enumerate(label):
l[:, 0] = i
return torch.from_numpy(np.array(img)), torch.from_numpy(np.vstack(label)), np.vstack(img_info), img_id
def draw(img, label, savepath=False, windowName='image'):
pts = label[:, 1: -1]
for i, poly in enumerate(pts):
poly = poly.reshape([4, 2]).astype(np.int32)
cv2.polylines(img, [poly], isClosed=True, color=(0, 0, 255), thickness=2)
if savepath:
cv2.imwrite(savepath, img)
else:
cv2.namedWindow(windowName, 0)
cv2.imshow(windowName, img)
cv2.waitKey()
def drawOneImg(img, label, savepath=False, windowName='image'):
pts = label[:, 1: -1]
for i, poly in enumerate(pts):
poly = poly.reshape([4, 2]).astype(np.int32)
cv2.polylines(img, [poly], isClosed=True, color=(0, 0, 255), thickness=2)
if savepath:
cv2.imwrite(savepath, img)
else:
cv2.namedWindow("img", 0)
cv2.imshow("img", img)
cv2.waitKey()
class DOTADataset(Dataset):
def __init__(self, name="train", data_dir=None, img_size=(1024, 1024), preproc=None, cache=False, save_result_dir=None):
super().__init__(img_size)
self.imgs = None
self.name = name
self.data_dir = data_dir
self.img_size = img_size
self.labels_dir = os.path.join(data_dir, name, 'labelTxt')
self.imgs_dir = os.path.join(data_dir, name, 'images')
self.preproc = preproc
self.save_result_dir = save_result_dir
self.labels_file = [files for root, dirs, files in os.walk(self.labels_dir)]
self.labels_file = [os.path.join(self.labels_dir, file) for file in self.labels_file[0]]
self.imgs_file = [file.replace('labelTxt', 'images').replace('.txt', '.png') for file in self.labels_file]
assert len(self.labels_file) == len(self.imgs_file)
self.imgs_num = len(self.imgs_file)
self.class_id = {}
for i, cls in enumerate(dotav15_classes):
self.class_id[cls] = i
self.ids = [i for i in range(len(self.labels_file))]
random.shuffle(self.ids)
if cache :
self._cache_images()
def __len__(self):
return self.imgs_num
def load_image(self, index):
return cv2.imread(self.imgs_file[index])
def load_resized_img(self, index):
img = self.load_image(index)
r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
return resized_img
def load_anno(self, index):
ann_file = self.labels_file[index]
objects = dota_utils.parse_dota_poly2(ann_file)
targets = []
for obj in objects:
class_id = self.class_id[obj['name']]
poly = obj['poly']
targets.append([0] + poly + [class_id])
return np.array([targets])
def pull_item(self, index):
id_ = self.ids[index]
img = self.load_image(index)
height, width = img.shape[0], img.shape[1]
ann_file = self.labels_file[index]
objects = dota_utils.parse_dota_poly2(ann_file)
targets = []
for obj in objects:
class_id = self.class_id[obj['name']]
poly = obj['poly']
targets.append([0] + poly + [class_id])
res = np.array(targets)
img_info = np.array([height, width])
#self.draw(img, res)
return img, res.copy(), img_info, np.array([id_])
def __getitem__(self, index):
img, target, img_info, img_id = self.pull_item(index)
save_path = f"/home/yanggang/exprimentsForDataRead/{index}_draw.png"
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim, savepath=save_path)
return img, target, img_info, img_id
def letterbox(self, img, new_shape=(1024,1024), color=(114,114,114),auto=False, scaleFill=False, scale_up=True, stride=32):
shape = img.shape[:2]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scale_up:
r = min(r, 1.0)
radio = r, r
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
if auto:
dh, dw = np.mod(dh, stride), np.mod(dw, stride)
elif scaleFill:
dh, dw = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
radio = new_shape[1]/shape[1], new_shape[0]/shape[0]
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return im, radio, (dw, dh)
if __name__ == '__main__':
dataset = DOTADataset(name='train', data_dir='/home/yanggang/data/DOTA_SPLIT', preproc=OrientedTrainTransform())
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=4, shuffle=False,collate_fn=collate_fn)
for i, (img, target, img_info, img_id) in enumerate(dataloader):
labels = target[:, -1]
gt_classes = torch.nn.functional.one_hot(labels.to(torch.int64), 16)
print(f"reading {i} batch, {img.shape}")
| [
"cv2.imwrite",
"DOTA_devkit.dota_utils.parse_dota_poly2",
"random.shuffle",
"cv2.polylines",
"cv2.copyMakeBorder",
"os.path.join",
"yolox.data.data_augment.OrientedTrainTransform",
"cv2.imshow",
"numpy.array",
"numpy.mod",
"numpy.vstack",
"torch.utils.data.DataLoader",
"cv2.resize",
"cv2.waitKey",
"cv2.namedWindow",
"os.walk",
"cv2.imread"
] | [((6665, 6765), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': '(4)', 'shuffle': '(False)', 'collate_fn': 'collate_fn'}), '(dataset=dataset, batch_size=4, shuffle=False,\n collate_fn=collate_fn)\n', (6692, 6765), False, 'import torch\n'), ((1476, 1495), 'numpy.vstack', 'np.vstack', (['img_info'], {}), '(img_info)\n', (1485, 1495), True, 'import numpy as np\n'), ((1685, 1758), 'cv2.polylines', 'cv2.polylines', (['img', '[poly]'], {'isClosed': '(True)', 'color': '(0, 0, 255)', 'thickness': '(2)'}), '(img, [poly], isClosed=True, color=(0, 0, 255), thickness=2)\n', (1698, 1758), False, 'import cv2\n'), ((1785, 1811), 'cv2.imwrite', 'cv2.imwrite', (['savepath', 'img'], {}), '(savepath, img)\n', (1796, 1811), False, 'import cv2\n'), ((1830, 1860), 'cv2.namedWindow', 'cv2.namedWindow', (['windowName', '(0)'], {}), '(windowName, 0)\n', (1845, 1860), False, 'import cv2\n'), ((1870, 1897), 'cv2.imshow', 'cv2.imshow', (['windowName', 'img'], {}), '(windowName, img)\n', (1880, 1897), False, 'import cv2\n'), ((1906, 1919), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1917, 1919), False, 'import cv2\n'), ((2107, 2180), 'cv2.polylines', 'cv2.polylines', (['img', '[poly]'], {'isClosed': '(True)', 'color': '(0, 0, 255)', 'thickness': '(2)'}), '(img, [poly], isClosed=True, color=(0, 0, 255), thickness=2)\n', (2120, 2180), False, 'import cv2\n'), ((2207, 2233), 'cv2.imwrite', 'cv2.imwrite', (['savepath', 'img'], {}), '(savepath, img)\n', (2218, 2233), False, 'import cv2\n'), ((2252, 2277), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', '(0)'], {}), "('img', 0)\n", (2267, 2277), False, 'import cv2\n'), ((2287, 2309), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (2297, 2309), False, 'import cv2\n'), ((2318, 2331), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2329, 2331), False, 'import cv2\n'), ((2663, 2703), 'os.path.join', 'os.path.join', (['data_dir', 'name', '"""labelTxt"""'], {}), "(data_dir, name, 'labelTxt')\n", (2675, 2703), False, 'import os\n'), ((2728, 2766), 'os.path.join', 'os.path.join', (['data_dir', 'name', '"""images"""'], {}), "(data_dir, name, 'images')\n", (2740, 2766), False, 'import os\n'), ((3431, 3455), 'random.shuffle', 'random.shuffle', (['self.ids'], {}), '(self.ids)\n', (3445, 3455), False, 'import random\n'), ((3611, 3644), 'cv2.imread', 'cv2.imread', (['self.imgs_file[index]'], {}), '(self.imgs_file[index])\n', (3621, 3644), False, 'import cv2\n'), ((4107, 4144), 'DOTA_devkit.dota_utils.parse_dota_poly2', 'dota_utils.parse_dota_poly2', (['ann_file'], {}), '(ann_file)\n', (4134, 4144), False, 'from DOTA_devkit import dota_utils\n'), ((4346, 4365), 'numpy.array', 'np.array', (['[targets]'], {}), '([targets])\n', (4354, 4365), True, 'import numpy as np\n'), ((4579, 4616), 'DOTA_devkit.dota_utils.parse_dota_poly2', 'dota_utils.parse_dota_poly2', (['ann_file'], {}), '(ann_file)\n', (4606, 4616), False, 'from DOTA_devkit import dota_utils\n'), ((4817, 4834), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (4825, 4834), True, 'import numpy as np\n'), ((4854, 4879), 'numpy.array', 'np.array', (['[height, width]'], {}), '([height, width])\n', (4862, 4879), True, 'import numpy as np\n'), ((6384, 6471), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'color'}), '(img, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=color)\n', (6402, 6471), False, 'import cv2\n'), ((1424, 1437), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1432, 1437), True, 'import numpy as np\n'), ((1457, 1473), 'numpy.vstack', 'np.vstack', (['label'], {}), '(label)\n', (1466, 1473), True, 'import numpy as np\n'), ((2958, 2993), 'os.path.join', 'os.path.join', (['self.labels_dir', 'file'], {}), '(self.labels_dir, file)\n', (2970, 2993), False, 'import os\n'), ((4952, 4967), 'numpy.array', 'np.array', (['[id_]'], {}), '([id_])\n', (4960, 4967), True, 'import numpy as np\n'), ((6181, 6239), 'cv2.resize', 'cv2.resize', (['img', 'new_unpad'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n', (6191, 6239), False, 'import cv2\n'), ((6622, 6646), 'yolox.data.data_augment.OrientedTrainTransform', 'OrientedTrainTransform', ([], {}), '()\n', (6644, 6646), False, 'from yolox.data.data_augment import OrientedValTransform, OrientedTrainTransform\n'), ((2904, 2928), 'os.walk', 'os.walk', (['self.labels_dir'], {}), '(self.labels_dir)\n', (2911, 2928), False, 'import os\n'), ((5882, 5900), 'numpy.mod', 'np.mod', (['dh', 'stride'], {}), '(dh, stride)\n', (5888, 5900), True, 'import numpy as np\n'), ((5902, 5920), 'numpy.mod', 'np.mod', (['dw', 'stride'], {}), '(dw, stride)\n', (5908, 5920), True, 'import numpy as np\n')] |
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponse
from django.views.generic import CreateView
from django.contrib.auth.models import User
from django.core import serializers
from rest_framework.views import APIView
from user.forms import FormRegister
from user.serializers import UserSerializer
from django.urls import reverse_lazy
import json
def userListed(request):
list = serializers.serialize('json', User.objects.all(), fields=['username', 'first_name'])
return HttpResponse(list, content_type='application/json')
class UserRegister(CreateView):
model = User
template_name = 'user/register.html'
form_class = FormRegister
success_url = reverse_lazy('pet:pet_list')
#print(form_class.errors)
class UserAPI(APIView):
serializer = UserSerializer
def get(self, request, format=None):
list = User.objects.all()
response = self.serializer(list, many=True)
return HttpResponse(json.dumps(response.data), content_type='application/json') | [
"django.http.HttpResponse",
"django.contrib.auth.models.User.objects.all",
"json.dumps",
"django.urls.reverse_lazy"
] | [((519, 570), 'django.http.HttpResponse', 'HttpResponse', (['list'], {'content_type': '"""application/json"""'}), "(list, content_type='application/json')\n", (531, 570), False, 'from django.http import HttpResponse\n'), ((710, 738), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""pet:pet_list"""'], {}), "('pet:pet_list')\n", (722, 738), False, 'from django.urls import reverse_lazy\n'), ((453, 471), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (469, 471), False, 'from django.contrib.auth.models import User\n'), ((887, 905), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (903, 905), False, 'from django.contrib.auth.models import User\n'), ((986, 1011), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (996, 1011), False, 'import json\n')] |
from pathlib import Path
from matplotlib import pyplot
from apppath import ensure_existence
from draugr import PROJECT_APP_PATH
from draugr.tensorboard_utilities import TensorboardEventExporter
from draugr.writers import TrainingScalars
if __name__ == "__main__":
save = False
event_files = list(PROJECT_APP_PATH.user_log.rglob("events.out.tfevents.*"))
if len(event_files) > 0:
for _path_to_events_file in event_files:
print(f"Event file: {_path_to_events_file}")
_out_dir = Path.cwd() / "exclude" / "results"
ensure_existence(_out_dir)
tee = TensorboardEventExporter(
_path_to_events_file.parent, save_to_disk=save
)
print(f"Available tags: {tee.tags_available}")
tee.export_line_plot(TrainingScalars.training_loss.value, out_dir=_out_dir)
if not save:
pyplot.show()
else:
print("No events found")
| [
"draugr.PROJECT_APP_PATH.user_log.rglob",
"apppath.ensure_existence",
"pathlib.Path.cwd",
"draugr.tensorboard_utilities.TensorboardEventExporter",
"matplotlib.pyplot.show"
] | [((307, 363), 'draugr.PROJECT_APP_PATH.user_log.rglob', 'PROJECT_APP_PATH.user_log.rglob', (['"""events.out.tfevents.*"""'], {}), "('events.out.tfevents.*')\n", (338, 363), False, 'from draugr import PROJECT_APP_PATH\n'), ((570, 596), 'apppath.ensure_existence', 'ensure_existence', (['_out_dir'], {}), '(_out_dir)\n', (586, 596), False, 'from apppath import ensure_existence\n'), ((615, 687), 'draugr.tensorboard_utilities.TensorboardEventExporter', 'TensorboardEventExporter', (['_path_to_events_file.parent'], {'save_to_disk': 'save'}), '(_path_to_events_file.parent, save_to_disk=save)\n', (639, 687), False, 'from draugr.tensorboard_utilities import TensorboardEventExporter\n'), ((906, 919), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (917, 919), False, 'from matplotlib import pyplot\n'), ((523, 533), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (531, 533), False, 'from pathlib import Path\n')] |
from __future__ import print_function, division
import sys
import numpy as np
import csv
import os
import glob
import plotly
import plotly.graph_objs as go
def parse_stats(statsfn):
stats = {
'tp': [],
'fp': [],
'fn': [],
'num_input_sv': [],
'num_bp_away_from_sv_and_cents_and_telos': [],
'num_bp_away_from_cents_and_telos': [],
'numbp': [],
'num_sv_away_from_cents_and_telos': [],
'datasets': [],
}
labels = {}
with open(statsfn) as F:
reader = csv.DictReader(F, delimiter='\t')
for row in reader:
stats['tp'].append(float(row['num_bp_replaced_by_sv']))
stats['fp'].append(float(row['num_non_sv']))
stats['fn'].append(float(row['num_lone_sv']))
stats['num_sv_away_from_cents_and_telos'].append(float(row['num_bp_replaced_by_sv']) + float(row['num_lone_sv']))
stats['datasets'].append(row['dataset'])
for K in ('tp', 'fp', 'fn', 'num_input_sv', 'num_bp_away_from_sv_and_cents_and_telos', 'num_bp_away_from_cents_and_telos', 'num_sv_away_from_cents_and_telos'):
stats[K] = np.array(stats[K], dtype=np.float)
stats['numbp'] = stats['tp'] + stats['fp']
labels['numbp'] = stats['datasets']
stats['precision'] = stats['tp'] / (stats['tp'] + stats['fp'])
stats['recall'] = stats['tp'] / (stats['tp'] + stats['fn'])
labels['precision'] = labels['recall'] = stats['datasets']
oldlen = len(stats['precision'])
assert oldlen == len(stats['recall'])
notnan_idxs = np.logical_not(np.logical_or(np.isnan(stats['precision']), np.isnan(stats['recall'])))
stats['precision'] = stats['precision'][notnan_idxs]
stats['recall'] = stats['recall'][notnan_idxs]
assert len(stats['precision']) == len(stats['recall'])
#print(statsfn, 'has', oldlen - len(stats['precision']), 'nan')
stats['nonsv_ratio'] = (stats['fp'] + 1) / (stats['num_sv_away_from_cents_and_telos'] + 1)
assert np.count_nonzero(np.isnan(stats['nonsv_ratio'])) == 0
assert np.count_nonzero(stats['nonsv_ratio'] == 0) == 0
stats['nonsv_ratio'] = np.log2(stats['nonsv_ratio'])
labels['nonsv_ratio'] = ['%s (nonsv = %s, sv = %s)' % (stats['datasets'][idx], stats['fp'][idx], stats['num_sv_away_from_cents_and_telos'][idx]) for idx in range(len(stats['datasets']))]
return (stats, labels)
def scatter(traces, title, xtitle, ytitle, outfn, logx = False, xmin = None, xmax = None,):
xaxis = {
'title': xtitle,
'type': logx and 'log' or 'linear',
'range': [xmin, xmax],
}
layout = go.Layout(
title = title,
hovermode = 'closest',
xaxis = xaxis,
yaxis = {
'title': ytitle,
},
)
fig = go.Figure(data=traces, layout=layout)
plotly.offline.plot(fig, filename=outfn)
def cdf(arr, labels=None):
sorted_idxs = np.argsort(arr)
ret = [
arr[sorted_idxs],
np.linspace(0, 1, len(arr), endpoint=False),
]
if labels is not None:
ret.append([labels[idx] for idx in sorted_idxs])
return tuple(ret)
def plot_method_combos(statsfns):
xvals, yvals, xerrors, yerrors = [], [], [], []
runs = []
for statsfn in statsfns:
run = os.path.basename(statsfn).split('.')[1]
runs.append(run)
stats, _ = parse_stats(statsfn)
xvals.append(np.mean(stats['recall']))
yvals.append(np.mean(stats['precision']))
xerrors.append(np.std(stats['recall']))
yerrors.append(np.std(stats['precision']))
show_error_bars = False
method_combo_trace = go.Scatter(
mode = 'markers',
x = xvals,
y = yvals,
text = runs,
error_x = {
'type': 'data',
'array': xerrors,
'visible': show_error_bars,
},
error_y = {
'type': 'data',
'array': yerrors,
'visible': show_error_bars,
},
)
scatter(
[method_combo_trace],
'Performance of different combinations of (broad, dkfz, jabba, mustonen095, peifer, vanloo_wedge)',
'Recall',
'Precision',
'method_combos_perf.html',
)
def plot_ecdfs(run_label, statsfns):
traces = {
'precision': [],
'recall': [],
'numbp': [],
'nonsv_ratio': [],
}
vals = {
'precision': {},
'recall': {},
'numbp': {},
'nonsv_ratio': {},
}
for statsfn in statsfns:
run = os.path.basename(statsfn).split('.')[1]
print(statsfn, run)
stats, labels = parse_stats(statsfn)
for plot in traces.keys():
# Record vals so that we can examine them in IPython if we wish.
vals[plot][run] = stats[plot]
X, Y, L = cdf(stats[plot], labels[plot])
if True or run_label == 'consensus_methods':
line = {'width': 4}
else:
line = {'dash': 'dot', 'width': 4}
traces[plot].append(go.Scatter(
mode='lines',
x = X,
y = Y,
text = L,
name = '%s (%s values)' % (run, len(L)),
line = line,
# Comma corresponds to "Y_dkfz,jabba", which uses both (along with SVs).
#visible = (('any' in run or ',' in run) and True or 'legendonly'),
))
#for T in traces['numbp']:
# if T['name'].startswith('any'):
# T['visible'] = False
scatter(
traces['precision'],
'Precision ECDF',
'Precision',
'ECDF(x)',
'precision_ecdf.%s.html' % run_label,
)
scatter(
traces['recall'],
'Recall ECDF',
'Recall',
'ECDF(x)',
'recall_ecdf.%s.html' % run_label,
)
scatter(
traces['numbp'],
'# BPs ECDF',
'# BPs',
'ECDF(x)',
'numbp_ecdf.%s.html' % run_label,
logx = True,
#xmin = 1.9,
#xmax = 4,
)
scatter(
traces['nonsv_ratio'],
'Non-SV ratio ECDF',
'log2((# non-SV BPs + 1) / (# SVs + 1))',
'ECDF(x)',
'nonsv_ratio_ecdf.%s.html' % run_label,
xmin = -10,
xmax = 10,
)
def main():
run_label = sys.argv[1]
assert run_label in ('consensus_methods', 'indiv_methods')
plot_method_combos(sys.argv[2:])
plot_ecdfs(run_label, sys.argv[2:])
main()
| [
"numpy.mean",
"csv.DictReader",
"plotly.offline.plot",
"numpy.std",
"plotly.graph_objs.Scatter",
"numpy.argsort",
"numpy.array",
"numpy.count_nonzero",
"numpy.isnan",
"os.path.basename",
"plotly.graph_objs.Layout",
"plotly.graph_objs.Figure",
"numpy.log2"
] | [((2018, 2047), 'numpy.log2', 'np.log2', (["stats['nonsv_ratio']"], {}), "(stats['nonsv_ratio'])\n", (2025, 2047), True, 'import numpy as np\n'), ((2472, 2557), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': 'title', 'hovermode': '"""closest"""', 'xaxis': 'xaxis', 'yaxis': "{'title': ytitle}"}), "(title=title, hovermode='closest', xaxis=xaxis, yaxis={'title':\n ytitle})\n", (2481, 2557), True, 'import plotly.graph_objs as go\n'), ((2604, 2641), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'traces', 'layout': 'layout'}), '(data=traces, layout=layout)\n', (2613, 2641), True, 'import plotly.graph_objs as go\n'), ((2644, 2684), 'plotly.offline.plot', 'plotly.offline.plot', (['fig'], {'filename': 'outfn'}), '(fig, filename=outfn)\n', (2663, 2684), False, 'import plotly\n'), ((2729, 2744), 'numpy.argsort', 'np.argsort', (['arr'], {}), '(arr)\n', (2739, 2744), True, 'import numpy as np\n'), ((3391, 3598), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'mode': '"""markers"""', 'x': 'xvals', 'y': 'yvals', 'text': 'runs', 'error_x': "{'type': 'data', 'array': xerrors, 'visible': show_error_bars}", 'error_y': "{'type': 'data', 'array': yerrors, 'visible': show_error_bars}"}), "(mode='markers', x=xvals, y=yvals, text=runs, error_x={'type':\n 'data', 'array': xerrors, 'visible': show_error_bars}, error_y={'type':\n 'data', 'array': yerrors, 'visible': show_error_bars})\n", (3401, 3598), True, 'import plotly.graph_objs as go\n'), ((497, 530), 'csv.DictReader', 'csv.DictReader', (['F'], {'delimiter': '"""\t"""'}), "(F, delimiter='\\t')\n", (511, 530), False, 'import csv\n'), ((1064, 1098), 'numpy.array', 'np.array', (['stats[K]'], {'dtype': 'np.float'}), '(stats[K], dtype=np.float)\n', (1072, 1098), True, 'import numpy as np\n'), ((1944, 1987), 'numpy.count_nonzero', 'np.count_nonzero', (["(stats['nonsv_ratio'] == 0)"], {}), "(stats['nonsv_ratio'] == 0)\n", (1960, 1987), True, 'import numpy as np\n'), ((1493, 1521), 'numpy.isnan', 'np.isnan', (["stats['precision']"], {}), "(stats['precision'])\n", (1501, 1521), True, 'import numpy as np\n'), ((1523, 1548), 'numpy.isnan', 'np.isnan', (["stats['recall']"], {}), "(stats['recall'])\n", (1531, 1548), True, 'import numpy as np\n'), ((1898, 1928), 'numpy.isnan', 'np.isnan', (["stats['nonsv_ratio']"], {}), "(stats['nonsv_ratio'])\n", (1906, 1928), True, 'import numpy as np\n'), ((3178, 3202), 'numpy.mean', 'np.mean', (["stats['recall']"], {}), "(stats['recall'])\n", (3185, 3202), True, 'import numpy as np\n'), ((3221, 3248), 'numpy.mean', 'np.mean', (["stats['precision']"], {}), "(stats['precision'])\n", (3228, 3248), True, 'import numpy as np\n'), ((3269, 3292), 'numpy.std', 'np.std', (["stats['recall']"], {}), "(stats['recall'])\n", (3275, 3292), True, 'import numpy as np\n'), ((3313, 3339), 'numpy.std', 'np.std', (["stats['precision']"], {}), "(stats['precision'])\n", (3319, 3339), True, 'import numpy as np\n'), ((3063, 3088), 'os.path.basename', 'os.path.basename', (['statsfn'], {}), '(statsfn)\n', (3079, 3088), False, 'import os\n'), ((4156, 4181), 'os.path.basename', 'os.path.basename', (['statsfn'], {}), '(statsfn)\n', (4172, 4181), False, 'import os\n')] |
"""The mypy command execution script. This is used by the deploying
job mainly.
Command example:
$ python run_mypy.py
"""
from logging import Logger
import command_util
from apply_lints_and_build_docs import MYPY_COMMAND
from apysc._console import loggers
logger: Logger = loggers.get_info_logger()
def _main() -> None:
"""
Run the mypy command.
Raises
------
Exception
If there are any mypy errors.
"""
logger.info('mypy command started.')
stdout: str = command_util.run_command(command=MYPY_COMMAND)
if 'Success' not in stdout:
raise Exception('There are mypy errors.')
if __name__ == '__main__':
_main()
| [
"command_util.run_command",
"apysc._console.loggers.get_info_logger"
] | [((290, 315), 'apysc._console.loggers.get_info_logger', 'loggers.get_info_logger', ([], {}), '()\n', (313, 315), False, 'from apysc._console import loggers\n'), ((528, 574), 'command_util.run_command', 'command_util.run_command', ([], {'command': 'MYPY_COMMAND'}), '(command=MYPY_COMMAND)\n', (552, 574), False, 'import command_util\n')] |
'''
@package: pyAudioLex
@author: <NAME>
@module: uh_freq
#uh = interjection frequency
'''
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
from collections import Counter
def uh_freq(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
c=Counter(token for word, token in tokens)
return ['UH']/len(text)
| [
"collections.Counter",
"nltk.tokenize.word_tokenize"
] | [((239, 264), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['importtext'], {}), '(importtext)\n', (252, 264), False, 'from nltk.tokenize import word_tokenize\n'), ((301, 341), 'collections.Counter', 'Counter', (['(token for word, token in tokens)'], {}), '(token for word, token in tokens)\n', (308, 341), False, 'from collections import Counter\n')] |
import argparse
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import util
import resultserializer
def _calc_depth(parents):
K = len(parents) + 1
depth = np.nan + np.zeros(K, dtype=np.int)
depth[0] = 0
stack = [0]
while len(stack) > 0:
node = stack.pop()
children = np.flatnonzero(parents == node) + 1
depth[children] = depth[node] + 1
stack += children.tolist()
assert not np.any(np.isnan(depth))
return depth[1:].astype(np.int)
def _calc_num_pops(parents):
# Calculate number of populations in each subclone.
adj = util.convert_parents_to_adjmatrix(parents)
K = len(adj)
assert adj.shape == (K,K)
anc = util.make_ancestral_from_adj(adj)
C = np.sum(anc, axis=1)
assert C[0] == K
return C[1:].astype(np.int)
def _calc_tree_stats(truth_fn):
truth = resultserializer.Results(truth_fn)
eta = truth.get('eta')
phi = truth.get('phi')
struct = truth.get('structure')
phi_std = np.std(phi, axis=1)
phi_mean = np.mean(phi, axis=1)
depth = _calc_depth(struct)
num_pops = _calc_num_pops(struct)
df = pd.DataFrame({
'phi_std': phi_std[1:],
'phi_mean': phi_mean[1:],
'largest_eta': np.max(eta, axis=1)[1:],
'depth': depth,
'num_pops': num_pops,
})
polyprimary = np.sum(struct == 0) > 1
return (df, polyprimary)
def _process(truth_fns):
tree_stats = pd.DataFrame()
polyprimary, monoprimary = 0, 0
for T in truth_fns:
ts, is_pp = _calc_tree_stats(T)
tree_stats = tree_stats.append(ts)
if is_pp:
polyprimary += 1
else:
monoprimary += 1
return (tree_stats, polyprimary, monoprimary)
def to_html(fig):
return pio.to_html(
fig,
full_html = False,
include_plotlyjs = 'cdn',
include_mathjax = 'cdn',
config = {
'showLink': True,
'toImageButtonOptions': {
'format': 'svg',
'width': 750,
'height': 450,
},
},
)
def count_num_pops(df):
counts = df['num_pops'].to_numpy()
assert 0 not in counts
vals = list(range(1, np.max(counts) + 1))
cnt = [np.sum(counts == val) for val in vals]
return (vals, cnt)
def make_box(df, x, y, title, xtitle, ytitle):
# Similar to px.box(), but with categorical axis that's properly ordered.
# px.box() seems to place categorical labels in random order.
rng = list(range(df[x].min(), df[x].max() + 1))
boxes = [go.Box(y=df[df[x] == N][y], name=N, boxmean=True, marker_color='#6773fa') for N in rng]
fig = go.Figure(
data=boxes,
layout={
'xaxis': {'title': xtitle, 'type': 'category'},
'yaxis': {'title': ytitle},
'title': title,
'template': 'plotly_white',
},
)
return fig
def plot_tree_stats(df, alpha, K):
figs = []
figs.append(make_box(
df,
x='num_pops',
y='phi_std',
title = f'φ standard deviation for {K} populations ({len(df):,} samples)',
xtitle='Number of populations in subclone',
ytitle='φ standard deviation',
))
figs.append(make_box(
df,
x='num_pops',
y='phi_mean',
title = f'Mean φ for {K} populations ({len(df):,} samples)',
xtitle='Number of populations in subclone',
ytitle='φ mean',
))
for F in figs:
F.update_layout(showlegend = False)
F.update_xaxes(type = 'category')
K += 1
C = np.arange(1, K)
sum_var = np.sqrt( (C/K) * (1 - C/K) / (K*alpha + 1) )
sum_mean = C/K
analytical = pd.DataFrame({'C': C, 'sum_var': sum_var, 'sum_mean': sum_mean})
figs[0].add_scatter(x=analytical.C, y=analytical.sum_var, mode='markers', marker={'size': 22, 'opacity': 0.8, 'color': '#ff483b'})
figs[1].add_scatter(x=analytical.C, y=analytical.sum_mean, mode='markers', marker={'size': 22, 'opacity': 0.8, 'color': '#ff483b'})
subclone_sizes = count_num_pops(df)
figs.append({
'data': go.Bar(x=subclone_sizes[0], y=subclone_sizes[1]),
'layout': {
'xaxis': {'title': 'Number of populations in subclone', 'type': 'category'},
'yaxis': {'title': 'Count'},
}
})
figs.append(px.histogram(
df,
x = 'largest_eta',
labels = {
'largest_eta': 'Largest η for subpopulation across cancer samples',
},
))
figs[-1].update_xaxes(range = (0, 1))
figs[-1].update_yaxes(title = 'Count')
html = ''
for idx, F in enumerate(figs):
html += to_html(F)
return html
def plot_polyprimary(polyprimary, monoprimary):
fig = {
'data': go.Pie(labels=['Monoprimary', 'Polyprimary'], values=[monoprimary, polyprimary], sort=False),
'layout': { },
}
return to_html(fig)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('-K', type=int, required=True)
parser.add_argument('plot_fn')
parser.add_argument('truth_fns', nargs='+')
args = parser.parse_args()
tree_stats, polyprimary, monoprimary = _process(args.truth_fns)
ts = plot_tree_stats(tree_stats, args.alpha, args.K)
pp = plot_polyprimary(polyprimary, monoprimary)
with open(args.plot_fn, 'w') as F:
print(ts, file=F)
print(pp, file=F)
if __name__ == '__main__':
main()
| [
"numpy.sqrt",
"plotly.io.to_html",
"numpy.arange",
"plotly.graph_objects.Bar",
"numpy.mean",
"argparse.ArgumentParser",
"plotly.graph_objects.Pie",
"numpy.flatnonzero",
"numpy.max",
"pandas.DataFrame",
"resultserializer.Results",
"plotly.express.histogram",
"os.path.dirname",
"numpy.isnan",
"numpy.std",
"plotly.graph_objects.Box",
"util.convert_parents_to_adjmatrix",
"util.make_ancestral_from_adj",
"numpy.sum",
"plotly.graph_objects.Figure",
"numpy.zeros"
] | [((731, 773), 'util.convert_parents_to_adjmatrix', 'util.convert_parents_to_adjmatrix', (['parents'], {}), '(parents)\n', (764, 773), False, 'import util\n'), ((825, 858), 'util.make_ancestral_from_adj', 'util.make_ancestral_from_adj', (['adj'], {}), '(adj)\n', (853, 858), False, 'import util\n'), ((865, 884), 'numpy.sum', 'np.sum', (['anc'], {'axis': '(1)'}), '(anc, axis=1)\n', (871, 884), True, 'import numpy as np\n'), ((977, 1011), 'resultserializer.Results', 'resultserializer.Results', (['truth_fn'], {}), '(truth_fn)\n', (1001, 1011), False, 'import resultserializer\n'), ((1109, 1128), 'numpy.std', 'np.std', (['phi'], {'axis': '(1)'}), '(phi, axis=1)\n', (1115, 1128), True, 'import numpy as np\n'), ((1142, 1162), 'numpy.mean', 'np.mean', (['phi'], {'axis': '(1)'}), '(phi, axis=1)\n', (1149, 1162), True, 'import numpy as np\n'), ((1513, 1527), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1525, 1527), True, 'import pandas as pd\n'), ((1805, 1993), 'plotly.io.to_html', 'pio.to_html', (['fig'], {'full_html': '(False)', 'include_plotlyjs': '"""cdn"""', 'include_mathjax': '"""cdn"""', 'config': "{'showLink': True, 'toImageButtonOptions': {'format': 'svg', 'width': 750,\n 'height': 450}}"}), "(fig, full_html=False, include_plotlyjs='cdn', include_mathjax=\n 'cdn', config={'showLink': True, 'toImageButtonOptions': {'format':\n 'svg', 'width': 750, 'height': 450}})\n", (1816, 1993), True, 'import plotly.io as pio\n'), ((2615, 2770), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'boxes', 'layout': "{'xaxis': {'title': xtitle, 'type': 'category'}, 'yaxis': {'title': ytitle},\n 'title': title, 'template': 'plotly_white'}"}), "(data=boxes, layout={'xaxis': {'title': xtitle, 'type': 'category'\n }, 'yaxis': {'title': ytitle}, 'title': title, 'template': 'plotly_white'})\n", (2624, 2770), True, 'import plotly.graph_objects as go\n'), ((3423, 3438), 'numpy.arange', 'np.arange', (['(1)', 'K'], {}), '(1, K)\n', (3432, 3438), True, 'import numpy as np\n'), ((3451, 3497), 'numpy.sqrt', 'np.sqrt', (['(C / K * (1 - C / K) / (K * alpha + 1))'], {}), '(C / K * (1 - C / K) / (K * alpha + 1))\n', (3458, 3497), True, 'import numpy as np\n'), ((3528, 3592), 'pandas.DataFrame', 'pd.DataFrame', (["{'C': C, 'sum_var': sum_var, 'sum_mean': sum_mean}"], {}), "({'C': C, 'sum_var': sum_var, 'sum_mean': sum_mean})\n", (3540, 3592), True, 'import pandas as pd\n'), ((4681, 4793), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LOL HI THERE"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='LOL HI THERE', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (4704, 4793), False, 'import argparse\n'), ((192, 217), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (207, 217), False, 'import os\n'), ((344, 369), 'numpy.zeros', 'np.zeros', (['K'], {'dtype': 'np.int'}), '(K, dtype=np.int)\n', (352, 369), True, 'import numpy as np\n'), ((1421, 1440), 'numpy.sum', 'np.sum', (['(struct == 0)'], {}), '(struct == 0)\n', (1427, 1440), True, 'import numpy as np\n'), ((2210, 2231), 'numpy.sum', 'np.sum', (['(counts == val)'], {}), '(counts == val)\n', (2216, 2231), True, 'import numpy as np\n'), ((2519, 2592), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'df[df[x] == N][y]', 'name': 'N', 'boxmean': '(True)', 'marker_color': '"""#6773fa"""'}), "(y=df[df[x] == N][y], name=N, boxmean=True, marker_color='#6773fa')\n", (2525, 2592), True, 'import plotly.graph_objects as go\n'), ((4137, 4251), 'plotly.express.histogram', 'px.histogram', (['df'], {'x': '"""largest_eta"""', 'labels': "{'largest_eta': 'Largest η for subpopulation across cancer samples'}"}), "(df, x='largest_eta', labels={'largest_eta':\n 'Largest η for subpopulation across cancer samples'})\n", (4149, 4251), True, 'import plotly.express as px\n'), ((4518, 4614), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': "['Monoprimary', 'Polyprimary']", 'values': '[monoprimary, polyprimary]', 'sort': '(False)'}), "(labels=['Monoprimary', 'Polyprimary'], values=[monoprimary,\n polyprimary], sort=False)\n", (4524, 4614), True, 'import plotly.graph_objects as go\n'), ((462, 493), 'numpy.flatnonzero', 'np.flatnonzero', (['(parents == node)'], {}), '(parents == node)\n', (476, 493), True, 'import numpy as np\n'), ((588, 603), 'numpy.isnan', 'np.isnan', (['depth'], {}), '(depth)\n', (596, 603), True, 'import numpy as np\n'), ((3928, 3976), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'subclone_sizes[0]', 'y': 'subclone_sizes[1]'}), '(x=subclone_sizes[0], y=subclone_sizes[1])\n', (3934, 3976), True, 'import plotly.graph_objects as go\n'), ((1329, 1348), 'numpy.max', 'np.max', (['eta'], {'axis': '(1)'}), '(eta, axis=1)\n', (1335, 1348), True, 'import numpy as np\n'), ((2180, 2194), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (2186, 2194), True, 'import numpy as np\n')] |
from setuptools import setup, find_packages
version = '0.2.3'
print(find_packages())
setup(
name='automatia',
packages=find_packages(),
version=version,
description='CLI swiss knife for all kinds of archive sites',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ShadowJonathan/automatia.py',
# download_url='https://github.com/ShadowJonathan/automatia.py/archive/{}.tar.gz'.format(version),
# keywords=['testing', 'logging', 'example'], # arbitrary keywords
# classifiers=[],
license="MIT",
requires=['six'],
python_requires='>=2.7',
entry_points={
'console_scripts': ['automatia=automatia.cli:main'],
},
)
| [
"setuptools.find_packages"
] | [((70, 85), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (83, 85), False, 'from setuptools import setup, find_packages\n'), ((130, 145), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (143, 145), False, 'from setuptools import setup, find_packages\n')] |
import os
import yaml
PROJECT_CONFIG_PATH = os.path.join(os.curdir, 'erde.yml')
USER_CONFIG_PATH = os.path.join(os.path.expanduser("~"), '.erde.yml')
# to override this config, put .erde.yml into your home folder
CONFIG = {
'routers': {
'local': 'http://localhost:5000'
}
}
# code by Schlomo https://stackoverflow.com/a/15836901/171278
class MergeError(Exception):
pass
def data_merge(a, b):
"""merges b into a and return merged result
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen"""
key = None
try:
if a is None or isinstance(a, (str, int, float)):
a = b
elif isinstance(a, list):
if isinstance(b, list):
a.extend(b)
else:
a.append(b)
elif isinstance(a, dict):
if isinstance(b, dict):
for key in b:
a[key] = data_merge(a.get(key, None), b[key])
else:
raise MergeError('Cannot merge non-dict "%s" into dict "%s"' % (b, a))
else:
raise MergeError('NOT IMPLEMENTED "%s" into "%s"' % (b, a))
except TypeError as e:
raise MergeError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))
return a
for p in (USER_CONFIG_PATH, PROJECT_CONFIG_PATH):
if not os.path.exists(p):
continue
with open(p) as f:
CONFIG = data_merge(CONFIG, yaml.load(f, Loader=yaml.FullLoader))
| [
"os.path.exists",
"os.path.join",
"yaml.load",
"os.path.expanduser"
] | [((45, 80), 'os.path.join', 'os.path.join', (['os.curdir', '"""erde.yml"""'], {}), "(os.curdir, 'erde.yml')\n", (57, 80), False, 'import os\n'), ((113, 136), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (131, 136), False, 'import os\n'), ((1191, 1208), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (1205, 1208), False, 'import os\n'), ((1271, 1307), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1280, 1307), False, 'import yaml\n')] |
import numpy as np
def all_stuff():
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]])
Y = np.array([[0], [1], [1], [0]])
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# variable initialization
epoch = 10000
learning_rate = 0.1
input_layer_neurons = X.shape[1]
hidden_layer_neurons = 2
output_layer_neurons = 1
# weights and bias initialization
weights = np.random.uniform(size=(input_layer_neurons, hidden_layer_neurons))
bias = np.random.uniform(size=(1, hidden_layer_neurons))
weights_out = np.random.uniform(size=(hidden_layer_neurons, output_layer_neurons))
bias_out = np.random.uniform(size=(1, output_layer_neurons))
for i in range(epoch):
# forward propagation
hidden_layer_input = np.dot(X, weights)
hidden_layer_input_biased = hidden_layer_input + bias
hidden_layer_activations = sigmoid(hidden_layer_input_biased)
output_layer_input = np.dot(hidden_layer_activations, weights_out)
output_layer_input_biased = output_layer_input + bias_out
output = sigmoid(output_layer_input_biased)
# back propagation
E = Y - output
slope_output_layer = sigmoid_derivative(output)
slope_hidden_layer = sigmoid_derivative(hidden_layer_activations)
d_output = E * slope_output_layer
E_at_hidden_layer = d_output.dot(weights_out.T)
d_hidden_layer = E_at_hidden_layer * slope_hidden_layer
weights_out += hidden_layer_activations.T.dot(d_output) * learning_rate
bias_out += np.sum(d_output, axis=0, keepdims=True) * learning_rate
weights += X.T.dot(d_hidden_layer) * learning_rate
bias += np.sum(d_hidden_layer, axis=0, keepdims=True) * learning_rate
if i % 1000 == 0:
print("Output\n",output)
print("Error\n", E)
print("Weights\n", weights)
if __name__ == '__main__':
all_stuff() | [
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.random.uniform"
] | [((45, 87), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (53, 87), True, 'import numpy as np\n'), ((134, 164), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (142, 164), True, 'import numpy as np\n'), ((518, 585), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(input_layer_neurons, hidden_layer_neurons)'}), '(size=(input_layer_neurons, hidden_layer_neurons))\n', (535, 585), True, 'import numpy as np\n'), ((597, 646), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, hidden_layer_neurons)'}), '(size=(1, hidden_layer_neurons))\n', (614, 646), True, 'import numpy as np\n'), ((665, 733), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(hidden_layer_neurons, output_layer_neurons)'}), '(size=(hidden_layer_neurons, output_layer_neurons))\n', (682, 733), True, 'import numpy as np\n'), ((749, 798), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, output_layer_neurons)'}), '(size=(1, output_layer_neurons))\n', (766, 798), True, 'import numpy as np\n'), ((890, 908), 'numpy.dot', 'np.dot', (['X', 'weights'], {}), '(X, weights)\n', (896, 908), True, 'import numpy as np\n'), ((1070, 1115), 'numpy.dot', 'np.dot', (['hidden_layer_activations', 'weights_out'], {}), '(hidden_layer_activations, weights_out)\n', (1076, 1115), True, 'import numpy as np\n'), ((1685, 1724), 'numpy.sum', 'np.sum', (['d_output'], {'axis': '(0)', 'keepdims': '(True)'}), '(d_output, axis=0, keepdims=True)\n', (1691, 1724), True, 'import numpy as np\n'), ((1816, 1861), 'numpy.sum', 'np.sum', (['d_hidden_layer'], {'axis': '(0)', 'keepdims': '(True)'}), '(d_hidden_layer, axis=0, keepdims=True)\n', (1822, 1861), True, 'import numpy as np\n'), ((214, 224), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (220, 224), True, 'import numpy as np\n')] |
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
import os
import sys
import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import numpy as np
class FeatureDecomposer:
def __init__(self):
pass
def decompose_features(self,config,input_df):
config = config["decomposer"]
decomposer = config["type"]
if config["engine"] == "training":
if decomposer == "PCA":
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(input_df)
principalDf = pd.DataFrame(data = principalComponents, columns = ['pc1', 'pc2'])
else:
## Here the saved pca model will be loaded for inerence
pass
return principalDf
# dc = FeatureDecomposer()
# decomposed_features = dc.decompose_features(config,scaled_df) | [
"sklearn.decomposition.PCA",
"warnings.filterwarnings",
"pandas.DataFrame"
] | [((36, 69), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (59, 69), False, 'import warnings\n'), ((632, 651), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (635, 651), False, 'from sklearn.decomposition import PCA\n'), ((748, 810), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents', 'columns': "['pc1', 'pc2']"}), "(data=principalComponents, columns=['pc1', 'pc2'])\n", (760, 810), True, 'import pandas as pd\n')] |
from torch import Tensor, relu, tanh
import torch.nn as nn
from typing import Any, Optional
from torch.nn.functional import softplus
import torch
from torch.distributions import Independent, Normal, TransformedDistribution
from torch.distributions.transforms import TanhTransform
from numpy import ndarray
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
class Policy(nn.Module):
"""
Gaussian policy
For each action in the action space, output a scalar in [-1, 1]
"""
def __init__(
self,
state_dim: int,
action_dim: int,
hidden_dim: int,
max_action: float,
adam_kwargs: dict[str, Any],
log_sigmas_post: str,
):
"""
:param state_dim: Dimension of the observation space
:param action_dim: Dimension of the action space
:param hidden_dim: Hidden dimension size in 2-layer NN
:param max_action: Max action scaling (after tanh)
:param adam_kwargs: keyword arguments for adam optimizer
:param log_sigmas_post: post-processing of log_sigmas (clipping/clamping or softplus)
"""
super(Policy, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
# hidden_dim -> mu0, mu1, ..., sig0, sig1, ...
self.l3 = nn.Linear(hidden_dim, action_dim * 2)
self.action_dim = action_dim
self.max_action = max_action
self.optimizer = torch.optim.Adam(self.parameters(), **adam_kwargs)
assert log_sigmas_post in ["softplus", "clamp"]
self.log_sigmas_post = log_sigmas_post
def forward(
self, state: Tensor, deterministic: bool
) -> tuple[Tensor, Optional[tuple[Tensor, Tensor, Tensor]]]:
"""
Returns (action, (log_prob(action), mus, log_sigmas)).
mus and log_sigmas are passed along for regularization.
"""
h = relu(self.l1(state))
h = relu(self.l2(h))
h = self.l3(h)
mus = h[:, : self.action_dim]
log_sigmas = h[:, self.action_dim :]
# apply softplus and add epsilon
# https://github.com/rail-berkeley/softlearning/blob/master/softlearning/policies/gaussian_policy.py line 276
if self.log_sigmas_post == "softplus":
log_sigmas = softplus(log_sigmas) + 1e-5
elif self.log_sigmas_post == "clamp":
log_sigmas = torch.clamp(log_sigmas, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
else:
raise Exception(f"bad option for log sigma post processing: {self.log_sigmas_post=}")
normal = TransformedDistribution(
Independent(Normal(mus, torch.exp(log_sigmas)), 1), [TanhTransform(cache_size=1)]
)
if deterministic:
return self.max_action * tanh(normal.base_dist.mean), None
else:
# reparametrization trick, sampling closely following the original implementation
actions = normal.rsample() # (b, action space dim)
log_probs = normal.log_prob(actions).unsqueeze(-1) # (b, 1)
if self.max_action == 0.0: # only for debugging
log_probs *= 0.0
return self.max_action * actions, (log_probs, mus, log_sigmas)
def get_action(self, state: ndarray):
"""
Get action for evaluation of policy
"""
with torch.no_grad():
action, _ = self.forward(
torch.tensor(state, dtype=torch.float32, device=device).unsqueeze(0),
deterministic=True,
)
return action.cpu().numpy()
class Value(nn.Module):
"""
Value funtion network gets the state, outputs a single scalar (soft value)
"""
def __init__(self, state_dim: int, hidden_dim: int, adam_kwargs: dict[str, Any]):
super(Value, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, 1)
self.optimizer = torch.optim.Adam(self.parameters(), **adam_kwargs)
def forward(self, state: Tensor) -> Tensor:
h = relu(self.l1(state))
h = relu(self.l2(h))
return self.l3(h)
class Q(nn.Module):
"""
Q function network gets the state and action, outputs a single scalar (state-action value)
"""
def __init__(
self, state_dim: int, action_dim: int, hidden_dim: int, adam_kwargs: dict[str, Any]
):
super(Q, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, 1)
self.optimizer = torch.optim.Adam(self.parameters(), **adam_kwargs)
def forward(self, state: Tensor, action: Tensor) -> Tensor:
h = relu(self.l1(torch.cat((state, action), dim=1)))
h = relu(self.l2(h))
return self.l3(h)
| [
"torch.tanh",
"torch.exp",
"torch.nn.functional.softplus",
"torch.cat",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.distributions.transforms.TanhTransform",
"torch.clamp"
] | [((339, 364), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (362, 364), False, 'import torch\n'), ((1232, 1264), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'hidden_dim'], {}), '(state_dim, hidden_dim)\n', (1241, 1264), True, 'import torch.nn as nn\n'), ((1283, 1316), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (1292, 1316), True, 'import torch.nn as nn\n'), ((1390, 1427), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(action_dim * 2)'], {}), '(hidden_dim, action_dim * 2)\n', (1399, 1427), True, 'import torch.nn as nn\n'), ((3912, 3944), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'hidden_dim'], {}), '(state_dim, hidden_dim)\n', (3921, 3944), True, 'import torch.nn as nn\n'), ((3963, 3996), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (3972, 3996), True, 'import torch.nn as nn\n'), ((4015, 4039), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (4024, 4039), True, 'import torch.nn as nn\n'), ((4558, 4603), 'torch.nn.Linear', 'nn.Linear', (['(state_dim + action_dim)', 'hidden_dim'], {}), '(state_dim + action_dim, hidden_dim)\n', (4567, 4603), True, 'import torch.nn as nn\n'), ((4622, 4655), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (4631, 4655), True, 'import torch.nn as nn\n'), ((4674, 4698), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (4683, 4698), True, 'import torch.nn as nn\n'), ((3420, 3435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3433, 3435), False, 'import torch\n'), ((2368, 2388), 'torch.nn.functional.softplus', 'softplus', (['log_sigmas'], {}), '(log_sigmas)\n', (2376, 2388), False, 'from torch.nn.functional import softplus\n'), ((2467, 2524), 'torch.clamp', 'torch.clamp', (['log_sigmas'], {'min': 'LOG_SIG_MIN', 'max': 'LOG_SIG_MAX'}), '(log_sigmas, min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n', (2478, 2524), False, 'import torch\n'), ((2745, 2772), 'torch.distributions.transforms.TanhTransform', 'TanhTransform', ([], {'cache_size': '(1)'}), '(cache_size=1)\n', (2758, 2772), False, 'from torch.distributions.transforms import TanhTransform\n'), ((4866, 4899), 'torch.cat', 'torch.cat', (['(state, action)'], {'dim': '(1)'}), '((state, action), dim=1)\n', (4875, 4899), False, 'import torch\n'), ((2716, 2737), 'torch.exp', 'torch.exp', (['log_sigmas'], {}), '(log_sigmas)\n', (2725, 2737), False, 'import torch\n'), ((2847, 2874), 'torch.tanh', 'tanh', (['normal.base_dist.mean'], {}), '(normal.base_dist.mean)\n', (2851, 2874), False, 'from torch import Tensor, relu, tanh\n'), ((3491, 3546), 'torch.tensor', 'torch.tensor', (['state'], {'dtype': 'torch.float32', 'device': 'device'}), '(state, dtype=torch.float32, device=device)\n', (3503, 3546), False, 'import torch\n')] |
from pygears import gear
@gear(outnames=['dout', 'pred_out'])
def release_after_eot(din: 'w_din',
pred: 'w_pred') -> ('w_din', 'w_pred'):
pass
| [
"pygears.gear"
] | [((28, 63), 'pygears.gear', 'gear', ([], {'outnames': "['dout', 'pred_out']"}), "(outnames=['dout', 'pred_out'])\n", (32, 63), False, 'from pygears import gear\n')] |
# Copyright (C) 2011-2012 <NAME> and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from nose.tools import *
from libpepper import builtins
from libpepper.environment import PepEnvironment
from libpepper.cpp.cppvalues import *
from libpepper.cpp.cpprenderer import PepCppRenderer
def test_Int_evaluated_type():
env = PepEnvironment( None )
assert_equal( PepInt( '3' ).evaluated_type( env ), PepType( PepInt ) )
def test_Float_evaluated_type():
env = PepEnvironment( None )
assert_equal( PepFloat( '3.0' ).evaluated_type( env ), PepType( PepFloat ) )
def test_Bool_evaluated_type():
env = PepEnvironment( None )
assert_equal( PepBool( True ).evaluated_type( env ), PepType( PepBool ) )
def test_Variable_evaluated_type():
env = PepEnvironment( None )
assert_equal(
PepVariable( PepType( PepString ), "x" ).evaluated_type( env ),
PepType( PepString )
)
def test_Plus_evaluated_type():
env = PepEnvironment( None )
plus = PepPlus( PepInt( "1" ), PepInt( "2" ) )
assert_equal( plus.evaluated_type( env ), PepType( PepInt ) )
def test_Times_evaluated_type():
env = PepEnvironment( None )
times = PepTimes( PepInt( "1" ), PepInt( "2" ) )
assert_equal( times.evaluated_type( env ), PepType( PepInt ) )
def test_Known_Symbol_evaluated_type():
env = PepEnvironment( None )
init = PepInit( PepType( PepBool ), PepSymbol( "x" ), PepBool( True ) )
init.evaluate( env )
assert_equal( PepSymbol( "x" ).evaluated_type( env ), PepType( PepBool ) )
def test_Unknown_Symbol_evaluated_type():
env = PepEnvironment( None )
init = PepInit( PepType( PepString ), PepSymbol( "x" ), PepVariable(
PepType( PepString ), "x" ) )
init.evaluate( env )
assert_equal( PepSymbol( "x" ).evaluated_type( env ), PepType( PepString ) )
def test_FunctionCall_evaluated_type():
env = PepEnvironment( None )
func = PepUserFunction( "myfunc", PepType( PepBool ), (), ( PepPass(), ) )
assert_equal(
PepFunctionCall( func, () ).evaluated_type( env ),
PepType( PepBool )
)
def test_GreaterThan_evaluated_type():
env = PepEnvironment( None )
value = PepGreaterThan( PepInt( "4" ), PepInt( "5" ) )
assert_equal( value.evaluated_type( env ), PepType( PepBool ) )
def GreaterThan_is_true_if_larger__test():
env = PepEnvironment( None )
assert_true(
PepGreaterThan(
PepInt( "6" ), PepInt( "4" )
).evaluate( env ).value
)
assert_true(
PepGreaterThan(
PepFloat( "6.1" ), PepFloat( "6.0" )
).evaluate( env ).value
)
def GreaterThan_is_false_if_equal__test():
env = PepEnvironment( None )
assert_false(
PepGreaterThan(
PepInt( "4" ), PepInt( "4" )
).evaluate( env ).value
)
assert_false(
PepGreaterThan(
PepFloat( "4.3" ), PepFloat( "4.3" )
).evaluate( env ).value
)
def GreaterThan_is_false_if_smaller__test():
env = PepEnvironment( None )
assert_false(
PepGreaterThan(
PepInt( "2" ), PepInt( "4" )
).evaluate( env ).value
)
assert_false(
PepGreaterThan(
PepFloat( "2.2" ), PepFloat( "4.4" )
).evaluate( env ).value
)
def test_LessThan_evaluated_type():
env = PepEnvironment( None )
value = PepLessThan( PepInt( "4" ), PepInt( "5" ) )
assert_equal( value.evaluated_type( env ), PepType( PepBool ) )
def LessThan_is_true_if_smaller__test():
env = PepEnvironment( None )
assert_true(
PepLessThan(
PepInt( "3" ), PepInt( "4" )
).evaluate( env ).value
)
assert_true(
PepLessThan(
PepFloat( "3.3" ), PepFloat( "3.4" )
).evaluate( env ).value
)
def LessThan_is_false_if_equal__test():
env = PepEnvironment( None )
assert_false(
PepLessThan(
PepFloat( "4.8" ), PepFloat( "4.8" )
).evaluate( env ).value
)
assert_false(
PepLessThan(
PepInt( "4" ), PepInt( "4" )
).evaluate( env ).value
)
def LessThan_is_false_if_larger__test():
env = PepEnvironment( None )
assert_false(
PepLessThan(
PepInt( "10" ), PepInt( "8" )
).evaluate( env ).value
)
assert_false(
PepLessThan(
PepFloat( "10.0" ), PepFloat( "8.1" )
).evaluate( env ).value
)
| [
"libpepper.environment.PepEnvironment"
] | [((373, 393), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (387, 393), False, 'from libpepper.environment import PepEnvironment\n'), ((515, 535), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (529, 535), False, 'from libpepper.environment import PepEnvironment\n'), ((662, 682), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (676, 682), False, 'from libpepper.environment import PepEnvironment\n'), ((810, 830), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (824, 830), False, 'from libpepper.environment import PepEnvironment\n'), ((1002, 1022), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (1016, 1022), False, 'from libpepper.environment import PepEnvironment\n'), ((1187, 1207), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (1201, 1207), False, 'from libpepper.environment import PepEnvironment\n'), ((1383, 1403), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (1397, 1403), False, 'from libpepper.environment import PepEnvironment\n'), ((1641, 1661), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (1655, 1661), False, 'from libpepper.environment import PepEnvironment\n'), ((1935, 1955), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (1949, 1955), False, 'from libpepper.environment import PepEnvironment\n'), ((2199, 2219), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (2213, 2219), False, 'from libpepper.environment import PepEnvironment\n'), ((2403, 2423), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (2417, 2423), False, 'from libpepper.environment import PepEnvironment\n'), ((2728, 2748), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (2742, 2748), False, 'from libpepper.environment import PepEnvironment\n'), ((3057, 3077), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (3071, 3077), False, 'from libpepper.environment import PepEnvironment\n'), ((3377, 3397), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (3391, 3397), False, 'from libpepper.environment import PepEnvironment\n'), ((3577, 3597), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (3591, 3597), False, 'from libpepper.environment import PepEnvironment\n'), ((3893, 3913), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (3907, 3913), False, 'from libpepper.environment import PepEnvironment\n'), ((4212, 4232), 'libpepper.environment.PepEnvironment', 'PepEnvironment', (['None'], {}), '(None)\n', (4226, 4232), False, 'from libpepper.environment import PepEnvironment\n')] |
#! /usr/bin/env python
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d,a = pipe.read("1D_time.fid")
d,a = p.ha(d,a)
pipe.write("ha1.glue",d,a,overwrite=True)
d,a = pipe.read("1D_time.fid")
d,a = p.ha(d,a,inv=True)
pipe.write("ha2.glue",d,a,overwrite=True)
| [
"nmrglue.fileio.pipe.write",
"nmrglue.process.pipe_proc.ha",
"nmrglue.fileio.pipe.read"
] | [((104, 128), 'nmrglue.fileio.pipe.read', 'pipe.read', (['"""1D_time.fid"""'], {}), "('1D_time.fid')\n", (113, 128), True, 'import nmrglue.fileio.pipe as pipe\n'), ((135, 145), 'nmrglue.process.pipe_proc.ha', 'p.ha', (['d', 'a'], {}), '(d, a)\n', (139, 145), True, 'import nmrglue.process.pipe_proc as p\n'), ((145, 189), 'nmrglue.fileio.pipe.write', 'pipe.write', (['"""ha1.glue"""', 'd', 'a'], {'overwrite': '(True)'}), "('ha1.glue', d, a, overwrite=True)\n", (155, 189), True, 'import nmrglue.fileio.pipe as pipe\n'), ((194, 218), 'nmrglue.fileio.pipe.read', 'pipe.read', (['"""1D_time.fid"""'], {}), "('1D_time.fid')\n", (203, 218), True, 'import nmrglue.fileio.pipe as pipe\n'), ((225, 245), 'nmrglue.process.pipe_proc.ha', 'p.ha', (['d', 'a'], {'inv': '(True)'}), '(d, a, inv=True)\n', (229, 245), True, 'import nmrglue.process.pipe_proc as p\n'), ((244, 288), 'nmrglue.fileio.pipe.write', 'pipe.write', (['"""ha2.glue"""', 'd', 'a'], {'overwrite': '(True)'}), "('ha2.glue', d, a, overwrite=True)\n", (254, 288), True, 'import nmrglue.fileio.pipe as pipe\n')] |
import io
import logging
from typing import IO
from google.cloud import storage
from audiobot import settings
GOOGLE = settings.Google()
logger = logging.getLogger('storage')
logger.setLevel(logging.INFO)
def _blob(key: str) -> object:
return storage.Client()\
.bucket(GOOGLE.bucket)\
.blob(key)
def download(key: str) -> IO:
logger.info('Downloading: %s', key)
data = io.BytesIO()
_blob(key).download_to_file(data)
data.seek(0)
logger.info('Downloaded: %s', key)
return data
def upload(data: IO, key: str) -> str:
logger.info('Uploading: %s', key)
data.seek(0)
_blob(key).upload_from_file(data)
data.seek(0)
logger.info('Uploaded: %s', key)
return f'gs://{GOOGLE.bucket}/{key}'
def exists(key: str) -> bool:
logger.info('Checking for: %s', key)
result = _blob(key).exists()
logger.info('Checking for: %s', key)
return result
| [
"logging.getLogger",
"google.cloud.storage.Client",
"io.BytesIO",
"audiobot.settings.Google"
] | [((122, 139), 'audiobot.settings.Google', 'settings.Google', ([], {}), '()\n', (137, 139), False, 'from audiobot import settings\n'), ((150, 178), 'logging.getLogger', 'logging.getLogger', (['"""storage"""'], {}), "('storage')\n", (167, 178), False, 'import logging\n'), ((405, 417), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (415, 417), False, 'import io\n'), ((253, 269), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (267, 269), False, 'from google.cloud import storage\n')] |
import hashlib
from time import time
from requests import Session
from lib.const import response_codes, debug, fetch_time
class FacebookBrowser(object):
account_exists = None
API_SECRET = "<KEY>"
def __init__(self, username, password, proxy):
self.proxy = proxy
self.is_found = False
self.is_active = True
self.is_locked = False
self.start_time = None
self.browser = self.br()
self.username = username
self.password = password
self.is_attempted = False
def br(self):
session = Session()
session.proxies.update(self.proxy.addr if self.proxy else [])
return session
def check_exists(self, response):
pass # TODO
def check_response(self, response):
ok_error_codes = ['', None, 405] # TODO?
if 'error_code' in response and response['error_code'] not in ok_error_codes:
return response_codes['failed']
else:
return response_codes['succeed']
def authenicate(self):
response = self.post_data()
resp = {'attempted': False, 'accessed': False, 'locked': False}
if debug:
print('pass: {} => {}'.format(self.password, response))
if response:
resp['attempted'] = True
resp_code = self.check_response(response)
if resp_code == response_codes['locked']:
resp['locked'] = True
if resp_code == response_codes['succeed']:
resp['accessed'] = True
if FacebookBrowser.account_exists is None:
self.check_exists(response)
return resp
def attempt(self): # the only one public func
self.start_time = time()
resp = self.authenicate()
if resp['attempted']:
self.is_attempted = True
if not resp['locked']:
if resp['accessed']:
self.is_found = True
else:
self.is_locked = True
self.close()
def close(self):
self.is_active = False
def post_data(self):
sig = "api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail={}format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword={}return_ssl_resources=0v=1.0{}"\
.format(self.username, self.password, FacebookBrowser.API_SECRET)
xx = hashlib.md5(sig.encode()).hexdigest()
data = "api_key=882a8490361da98702bf97a021ddc14d&credentials_type=password&email={}&format=JSON&generate_machine_id=1&generate_session_cookies=1&locale=en_US&method=auth.login&password={}&return_ssl_resources=0&v=1.0&sig={}"\
.format(self.username, self.password, xx)
response = None
try:
response = self.browser.get("https://api.facebook.com/restserver.php?{}".format(data), timeout=fetch_time).json()
except:
pass
finally:
return response
| [
"time.time",
"requests.Session"
] | [((577, 586), 'requests.Session', 'Session', ([], {}), '()\n', (584, 586), False, 'from requests import Session\n'), ((1744, 1750), 'time.time', 'time', ([], {}), '()\n', (1748, 1750), False, 'from time import time\n')] |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
import os
import sys
import urlparse
from find_lldb import get_lldb
from handler import HandlerDomain, UndefinedHandlerError, handler
from logging_helper import log_debug
import file_manager
class DebuggerDomain(HandlerDomain):
'''Implement Chrome debugger domain protocol and
convert into lldb python API.
'''
def __init__(self, runtimeDomain, **kwargs):
HandlerDomain.__init__(self, **kwargs)
self.runtimeDomain = runtimeDomain
self.exceptionBreakpointId = None
@property
def name(self):
return 'Debugger'
@handler()
def canSetScriptSource(self, params):
# Return False, becuase we don't support
# changing source at runtime.
return {"result": False}
@handler()
def continueToLocation(self, params):
filelike = self.debugger_store.file_manager.get_by_client_url(params['location']['scriptId'])
if not filelike or not isinstance(filelike, file_manager.File):
# Only support setting breakpoints in real files.
return {}
lldb = get_lldb()
path = str(params['location']['scriptId'])
thread = self.debugger_store.debugger.GetSelectedTarget().GetProcess().GetSelectedThread()
frame = thread.GetSelectedFrame()
# atom line numbers a 0-based, while lldb is 1-based
line = int(params['location']['lineNumber']) + 1
thread.StepOverUntil(frame, filelike.server_obj, line)
return {}
@handler()
def disable(self, params):
# Not exactly the same as disable. Detach() might be closer to
# what Chrome Dev Tools is trying to do.
self.debugger_store.debugger.GetSelectedTarget().DisableAllBreakpoints()
return {}
@handler()
def enable(self, params):
formatter_path = os.path.join(os.path.dirname(__file__), 'data_formatter.py')
self.debugger_store.debugger.HandleCommand('command script import %s' % formatter_path)
self.debugger_store.chrome_channel.enable()
return {}
@handler()
def evaluateOnCallFrame(self, params):
frameId = params['callFrameId']
thread, frame = frameId.split('.')
# TODO: These return booleans to indicate success. Throw something if False.
process = self.debugger_store.debugger.GetSelectedTarget().process
process.SetSelectedThreadByIndexID(int(thread))
process.GetSelectedThread().SetSelectedFrame(int(frame))
return self.runtimeDomain.evaluate(params)
@handler()
def pause(self, params):
self.debugger_store.debugger.GetSelectedTarget().process.Stop()
return {}
@handler()
def removeBreakpoint(self, params):
self.debugger_store.debugger.GetSelectedTarget().BreakpointDelete(int(params['breakpointId']))
return {}
@handler()
def resume(self, params):
self.debugger_store.debugger.GetSelectedTarget().process.Continue()
return {}
@handler()
def selectThread(self, params):
threadId = params['threadId']
self.debugger_store.debugger.GetSelectedTarget().process.SetSelectedThreadByID(threadId)
return {}
@handler()
def getThreadStack(self, params):
threadId = params['threadId']
thread = self.debugger_store.debugger.GetSelectedTarget().process.GetThreadByID(threadId)
params = { "callFrames": [] }
if not thread == None:
params["callFrames"] = self.debugger_store.thread_manager.get_thread_stack(thread)
return params
@handler()
def searchInContent(self, params):
raise UndefinedHandlerError('searchInContent not implemented')
@handler()
def setBreakpoint(self, params):
filelike = self.debugger_store.file_manager.get_by_script_id(params['location']['scriptId'])
if not filelike or not isinstance(filelike, file_manager.File):
# Only support setting breakpoints in real files.
return {}
return self._set_breakpoint_by_filespec(
filelike.server_obj,
int(params['location']['lineNumber']) + 1)
@handler()
def setBreakpointByUrl(self, params):
# Use source file name to set breakpoint.
parsed_url = urlparse.urlparse(params['url'])
return self._set_breakpoint_by_source_path(
str(os.path.basename(parsed_url.path)),
int(params['lineNumber']) + 1,
str(params['condition']))
@handler()
def setBreakpointsActive(self, params):
if params['active']:
self.debugger_store.debugger.GetSelectedTarget().EnableAllBreakpoints()
else:
self.debugger_store.debugger.GetSelectedTarget().DisableAllBreakpoints()
return {}
@handler()
def setPauseOnExceptions(self, params):
# First, unhook the old breakpoint exceptions.
if self.exceptionBreakpointId is not None:
self.debugger_store.debugger.GetSelectedTarget().BreakpointDelete(
self.exceptionBreakpointId)
self.exceptionBreakpointId = None
# Next, we've been asked to do one of 'none' or 'uncaught' or 'all'.
# But we'll treat none+uncaught as no-op since that's all LLDB can do.
if params['state'] == 'all':
breakpoint = self.debugger_store.debugger.GetSelectedTarget(
).BreakpointCreateForException(get_lldb().eLanguageTypeC_plus_plus,
False, # don't pause on catch
True # do pause on throw
)
self.exceptionBreakpointId = breakpoint.id
return {}
@handler()
def setScriptSource(self, params):
raise UndefinedHandlerError('setScriptSource not supported for LLDB')
@handler()
def stepInto(self, params):
thread = self.debugger_store.debugger.GetSelectedTarget().GetProcess().GetSelectedThread()
flag = self._getSteppingFlag()
thread.StepInto(flag)
return {}
@handler()
def stepOut(self, params):
self.debugger_store.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().StepOut()
return {}
@handler()
def stepOver(self, params):
thread = self.debugger_store.debugger.GetSelectedTarget().GetProcess().GetSelectedThread()
flag = self._getSteppingFlag()
thread.StepOver(flag)
return {}
@handler()
def setDebuggerSettings(self, params):
self.debugger_store.setDebuggerSettings(params)
return {}
def _set_breakpoint_by_filespec(self, filespec, line):
breakpoint = self.debugger_store.debugger.GetSelectedTarget().BreakpointCreateByLocation(filespec, line)
return {
'breakpointId': str(breakpoint.id),
'locations':
self.debugger_store.location_serializer.get_breakpoint_locations(breakpoint),
}
def _getSteppingFlag(self):
lldb = get_lldb()
if self.debugger_store.getDebuggerSettings()['singleThreadStepping']:
return lldb.eOnlyThisThread
return lldb.eOnlyDuringStepping
def _set_breakpoint_by_source_path(self, source_path, line, condition):
breakpoint = self.debugger_store.debugger.GetSelectedTarget().BreakpointCreateByLocation(
source_path,
line)
if condition: # empty string is falsy.
breakpoint.SetCondition(condition)
return {
'breakpointId': str(breakpoint.id),
'locations': self.debugger_store.location_serializer.get_breakpoint_locations(breakpoint),
}
| [
"handler.UndefinedHandlerError",
"handler.handler",
"find_lldb.get_lldb",
"os.path.dirname",
"os.path.basename",
"handler.HandlerDomain.__init__",
"urlparse.urlparse"
] | [((764, 773), 'handler.handler', 'handler', ([], {}), '()\n', (771, 773), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((942, 951), 'handler.handler', 'handler', ([], {}), '()\n', (949, 951), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((1676, 1685), 'handler.handler', 'handler', ([], {}), '()\n', (1683, 1685), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((1942, 1951), 'handler.handler', 'handler', ([], {}), '()\n', (1949, 1951), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((2240, 2249), 'handler.handler', 'handler', ([], {}), '()\n', (2247, 2249), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((2716, 2725), 'handler.handler', 'handler', ([], {}), '()\n', (2723, 2725), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((2851, 2860), 'handler.handler', 'handler', ([], {}), '()\n', (2858, 2860), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((3028, 3037), 'handler.handler', 'handler', ([], {}), '()\n', (3035, 3037), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((3168, 3177), 'handler.handler', 'handler', ([], {}), '()\n', (3175, 3177), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((3373, 3382), 'handler.handler', 'handler', ([], {}), '()\n', (3380, 3382), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((3749, 3758), 'handler.handler', 'handler', ([], {}), '()\n', (3756, 3758), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((3875, 3884), 'handler.handler', 'handler', ([], {}), '()\n', (3882, 3884), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((4322, 4331), 'handler.handler', 'handler', ([], {}), '()\n', (4329, 4331), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((4669, 4678), 'handler.handler', 'handler', ([], {}), '()\n', (4676, 4678), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((4959, 4968), 'handler.handler', 'handler', ([], {}), '()\n', (4966, 4968), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((5971, 5980), 'handler.handler', 'handler', ([], {}), '()\n', (5978, 5980), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((6104, 6113), 'handler.handler', 'handler', ([], {}), '()\n', (6111, 6113), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((6338, 6347), 'handler.handler', 'handler', ([], {}), '()\n', (6345, 6347), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((6503, 6512), 'handler.handler', 'handler', ([], {}), '()\n', (6510, 6512), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((6737, 6746), 'handler.handler', 'handler', ([], {}), '()\n', (6744, 6746), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((573, 611), 'handler.HandlerDomain.__init__', 'HandlerDomain.__init__', (['self'], {}), '(self, **kwargs)\n', (595, 611), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((1268, 1278), 'find_lldb.get_lldb', 'get_lldb', ([], {}), '()\n', (1276, 1278), False, 'from find_lldb import get_lldb\n'), ((3812, 3868), 'handler.UndefinedHandlerError', 'UndefinedHandlerError', (['"""searchInContent not implemented"""'], {}), "('searchInContent not implemented')\n", (3833, 3868), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((4445, 4477), 'urlparse.urlparse', 'urlparse.urlparse', (["params['url']"], {}), "(params['url'])\n", (4462, 4477), False, 'import urlparse\n'), ((6034, 6097), 'handler.UndefinedHandlerError', 'UndefinedHandlerError', (['"""setScriptSource not supported for LLDB"""'], {}), "('setScriptSource not supported for LLDB')\n", (6055, 6097), False, 'from handler import HandlerDomain, UndefinedHandlerError, handler\n'), ((7279, 7289), 'find_lldb.get_lldb', 'get_lldb', ([], {}), '()\n', (7287, 7289), False, 'from find_lldb import get_lldb\n'), ((2020, 2045), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2035, 2045), False, 'import os\n'), ((4546, 4579), 'os.path.basename', 'os.path.basename', (['parsed_url.path'], {}), '(parsed_url.path)\n', (4562, 4579), False, 'import os\n'), ((5641, 5651), 'find_lldb.get_lldb', 'get_lldb', ([], {}), '()\n', (5649, 5651), False, 'from find_lldb import get_lldb\n')] |
from pathlib import Path
__all__ = ["elements_data"]
main_path = Path(__file__).parent
elements_data = (main_path / Path(r"elements.txt")).resolve()
| [
"pathlib.Path"
] | [((67, 81), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (71, 81), False, 'from pathlib import Path\n'), ((118, 138), 'pathlib.Path', 'Path', (['"""elements.txt"""'], {}), "('elements.txt')\n", (122, 138), False, 'from pathlib import Path\n')] |
from yacs.config import CfgNode as CN
_C = CN()
# Data
_C.DATA = CN()
_C.DATA.KITS19_DIR = '/data/kits19/data'
_C.DATA.TRAIN_CASES = list(range(0, 210))
_C.DATA.TEST_CASES = list(range(210, 300))
_C.DATA.CASES_TO_EXCLUDE = [15, 23, 37, 68, 125, 133] # see 'An attempt at beating the 3D U-Net' paper
_C.DATA.KITS19_RESAMPLED_DIR = '/data/kits19_resampled'
_C.DATA.SPACING = [3.22, 1.62, 1.62] # XXX: axis along the vertical slice comes the first
_C.DATA.FOLD_NUM = 5
_C.DATA.FOLD_SEED = 777
_C.DATA.FOLD_ID = 0
# DataLoader
_C.DATALOADER = CN()
_C.DATALOADER.TRAIN_BATCH_SIZE = 1 # 2 in 'An attempt at beating the 3D U-Net' paper
_C.DATALOADER.TRAIN_NUM_WORKERS = 1
_C.DATALOADER.VAL_BATCH_SIZE = 1 # 2 in 'An attempt at beating the 3D U-Net' paper
_C.DATALOADER.VAL_NUM_WORKERS = 1
_C.DATALOADER.SHUFFLE_SEED = 777
# Transform
_C.TRANSFORM = CN()
# intensity normalization
intensity_min = -79.0
intensity_max = 304.0
intensity_mean = 101.0
intensity_std = 76.9
_C.TRANSFORM.INTENSITY_MIN = intensity_min
_C.TRANSFORM.INTENSITY_MAX = intensity_max
_C.TRANSFORM.INTENSITY_MEAN = intensity_mean
_C.TRANSFORM.INTENSITY_STD = intensity_std
# padding and (random) cropping
_C.TRANSFORM.TRAIN_RANDOM_CROP_SIZE = [
160, 160, 80
] # RANDOM crop size BEFORE spatial augmentations for train set. axis along the vertical slice comes the last
_C.TRANSFORM.TRAIN_CROP_SIZE = [
160, 160, 80
] # CENTER crop size AFTER spatial augmentations for train set. axis along the vertical slice comes the last
_C.TRANSFORM.VAL_CROP_SIZE = [
256, 256, 128
] # center crop size for val set (augmentations are not applied). axis along the vertical slice comes the last
_C.TRANSFORM.IMAGE_PAD_MODE = 'constant' # ['constant', 'edge']
_C.TRANSFORM.IMAGE_PAD_VALUE = (intensity_min - intensity_mean) / intensity_std # only valid when mode='constant'
_C.TRANSFORM.LABEL_PAD_VALUE = 0 # only used for train label (val label is padded with IGNORE_LABEL value)
# random elastic deformation
_C.TRANSFORM.ENABLE_ELASTIC = False
_C.TRANSFORM.ELASTIC_SCALE = (0, 0.25)
_C.TRANSFORM.ELASTIC_PROB = 0.2
# random rotation
_C.TRANSFORM.ENABLE_ROTATION = True
_C.TRANSFORM.ROTATION_X = 15.0 # in deg
_C.TRANSFORM.ROTATION_Y = 15.0 # in deg
_C.TRANSFORM.ROTATION_Z = 15.0 # in deg
_C.TRANSFORM.ROTATION_PROB = 0.2
# random scale
_C.TRANSFORM.ENABLE_SCALE = True
_C.TRANSFORM.SCALE_RANGE = (0.85, 1.25)
_C.TRANSFORM.SCALE_PROB = 0.2
# random noises
_C.TRANSFORM.ENABLE_GAUSSIAN = True
_C.TRANSFORM.GAUSSIAN_VARIANCE = (0, 0.1)
_C.TRANSFORM.GAUSSIAN_PROB = 0.1
# random brightness
_C.TRANSFORM.ENABLE_BRIGHTNESS = True
_C.TRANSFORM.BRIGHTNESS_RANGE = (0.75, 1.25)
_C.TRANSFORM.BRIGHTNESS_PROB = 0.15
# random contrast
_C.TRANSFORM.ENABLE_CONTRAST = True
_C.TRANSFORM.CONTRAST_RANGE = (0.75, 1.25)
_C.TRANSFORM.CONTRAST_PROB = 0.15
# random gamma
_C.TRANSFORM.ENABLE_GAMMA = True
_C.TRANSFORM.GAMMA_RANGE = (0.7, 1.5)
_C.TRANSFORM.GAMMA_RETAIN_STATS = True
_C.TRANSFORM.GAMMA_INVERT_IMAGE = True
_C.TRANSFORM.GAMMA_PROB = 0.1
# random seed
_C.TRANSFORM.AUGMENTATION_SEED = 777
# Model
_C.MODEL = CN()
_C.MODEL.NAME = 'plane_unet_3d' # ['plane_unet_3d']
_C.MODEL.INPUT_CHANNELS = 1
_C.MODEL.OUTPUT_CHANNELS = 3
_C.MODEL.BASE_FEATURE_CHANNELS = 30
_C.MODEL.MAX_FEATURE_CHANNELS = 320
_C.MODEL.BASE_MODULE = 'double_conv' # ['double_conv']
_C.MODEL.NUM_LEVELS = 6
_C.MODEL.NORMALIZATION = 'instance_norm' # ['instance_norm', 'batch_norm']
_C.MODEL.NON_LINEARITY = 'leaky_relu' # ['leaky_relu', 'relu']
_C.MODEL.CONV_KERNEL_SIZE = (3, 3, 3)
_C.MODEL.PADDING_WIDTH = (1, 1, 1)
_C.MODEL.FIRST_DOWNSAMPLE_STRIDE = (2, 2, 1)
_C.MODEL.ACTIVATION = 'softmax' # ['softmax', 'sigmoid']
_C.MODEL.INITIALIZER = 'kaiming_normal'
_C.MODEL.DEVICE = 'cuda'
_C.MODEL.WEIGHT = 'none'
# Training
_C.TRAIN = CN()
_C.TRAIN.EPOCHS = 1000
_C.TRAIN.LR = 1e-2
_C.TRAIN.LR_SCHEDULER = 'poly' # ['poly']
_C.TRAIN.LR_POLY_EXPONENT = 0.9
_C.TRAIN.OPTIMIZER = 'sgd' # ['sgd', 'adam']
_C.TRAIN.OPTIMIZER_SGD_MOMENTUM = 0.99
_C.TRAIN.OPTIMIZER_SGD_NESTEROV = True
_C.TRAIN.LOSSES = ['ce', 'dice'] # ['ce', 'dice']
_C.TRAIN.LOSS_WEIGHTS = [1.0, 1.0]
_C.TRAIN.WEIGHT_DECAY = 3e-5
_C.TRAIN.MAIN_VAL_METRIC = 'val/kits19/dice'
_C.TRAIN.VAL_INTERVAL = 2
_C.TRAIN.IGNORE_LABEL = -1
_C.TRAIN.CHECKPOINT_PATH = 'none'
_C.TRAIN.SEED = 777
# Inference
_C.TEST = CN()
_C.TEST.THRESHOLD_KIDNEY = 0.5
_C.TEST.THRESHOLD_TUMOR = 0.5
# Misc
_C.OUTPUT_DIR = './outputs'
def get_default_config():
"""Get default config.
Returns:
YACS CfgNode: default config.
"""
return _C.clone()
| [
"yacs.config.CfgNode"
] | [((44, 48), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (46, 48), True, 'from yacs.config import CfgNode as CN\n'), ((67, 71), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (69, 71), True, 'from yacs.config import CfgNode as CN\n'), ((544, 548), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (546, 548), True, 'from yacs.config import CfgNode as CN\n'), ((850, 854), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (852, 854), True, 'from yacs.config import CfgNode as CN\n'), ((3078, 3082), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (3080, 3082), True, 'from yacs.config import CfgNode as CN\n'), ((3774, 3778), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (3776, 3778), True, 'from yacs.config import CfgNode as CN\n'), ((4310, 4314), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (4312, 4314), True, 'from yacs.config import CfgNode as CN\n')] |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import requests
import regex
import time
import pytz
from datetime import datetime, timedelta
import pandas as pd
from multiprocessing.pool import ThreadPool
import json
import logging
from configparser import ConfigParser
from optparse import OptionParser
import warnings
warnings.filterwarnings('ignore')
'''
This script gets performance metric data from Nagios API and ingests it into an IF metric project
'''
def set_logger_config():
'''
Configure logger object
'''
ISO8601 = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S', '%Y%m%dT%H%M%SZ', 'epoch']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging_format = logging.Formatter(
'{ts} [pid {pid}] {lvl} {mod}.{func}():{line} {msg}'.format(
ts='%(asctime)s',
pid='%(process)d',
lvl='%(levelname)-8s',
mod='%(module)s',
func='%(funcName)s',
line='%(lineno)d',
msg='%(message)s'),
ISO8601[0])
logging_handler_out = logging.StreamHandler(sys.stdout)
logging_handler_out.setFormatter(logging_format)
logger.addHandler(logging_handler_out)
return logger
def get_config_vars(config_path):
'''
Get config variables from the config file
'''
if not os.path.exists(config_path):
message = "No config file found. Exiting."
logger.info(message)
sys.exit(1)
config = ConfigParser()
config.read(config_path)
if_vars = {}
if_vars['host_url'] = config.get('insightFinder_vars', 'host_url')
if_vars['http_proxy'] = config.get('insightFinder_vars', 'http_proxy')
if_vars['https_proxy'] = config.get('insightFinder_vars', 'https_proxy')
if_vars['licenseKey'] = config.get('insightFinder_vars', 'licenseKey')
if_vars['project_name'] = config.get('insightFinder_vars', 'project_name')
if_vars['username'] = config.get('insightFinder_vars', 'username')
if_vars['retries'] = config.getint('insightFinder_vars', 'retries')
if_vars['sleep_seconds'] = config.getint('insightFinder_vars', 'sleep_seconds')
nagios_vars = {}
nagios_vars['host_url'] = config.get('nagios_vars', 'host_url')
nagios_vars['http_proxy'] = config.get('nagios_vars', 'http_proxy')
nagios_vars['https_proxy'] = config.get('nagios_vars', 'https_proxy')
nagios_vars['api_key'] = config.get('nagios_vars', 'api_key')
nagios_vars['host_names_list'] = config.get('nagios_vars', 'host_names_list')
nagios_vars['host_names_regex'] = config.get('nagios_vars', 'host_names_regex')
nagios_vars['service_descriptors_list'] = config.get('nagios_vars', 'service_descriptors_list')
nagios_vars['service_descriptors_regex'] = config.get('nagios_vars', 'service_descriptors_regex')
nagios_vars['retries'] = config.getint('nagios_vars', 'retries')
nagios_vars['sleep_seconds'] = config.getint('nagios_vars', 'sleep_seconds')
agent_vars = {}
agent_vars['query_interval'] = config.getint('agent_vars', 'query_interval')
agent_vars['sampling_interval'] = config.getint('agent_vars', 'sampling_interval')
agent_vars['thread_pool'] = config.getint('agent_vars', 'thread_pool')
agent_vars['chunk_size'] = config.getint('agent_vars', 'chunk_size_kb') * 1024
parser = OptionParser()
parser.add_option('-t', '--testing', action='store_true', dest='testing', default=False,
help='Set to testing mode (do not send data).')
(options, args) = parser.parse_args()
return if_vars, nagios_vars, agent_vars, options
def collect_performance_data():
'''
Collect performance metric data from Nagios
'''
logger.info("Collecting host and service descriptor lists.")
hosts = get_hosts()
if len(hosts) == 0:
logger.error('Host list is empty.')
sys.exit(1)
pool_map = ThreadPool(agent_vars['thread_pool'])
services = pool_map.map(get_services, hosts)
services = {service[0]: service[1] for service in services}
pool_map = ThreadPool(agent_vars['thread_pool'])
params = [(host, service) for host in hosts for service in services[host]]
if len(params) == 0:
logger.warn('No valid combinations of filters found. Exiting.')
sys.exit(1)
logger.info("Collecting the performance data.")
metric_data = pool_map.map(query_nagiosAPI, params)
metric_data = pd.concat(metric_data, axis=1, sort=True)
if len(metric_data) == 0:
logger.warning("No metric data found for the given parameters.")
sys.exit(1)
metric_data.index.name = 'timestamp'
metric_data = metric_data.reset_index()
metric_data = metric_data.astype(str).replace('nan','')
logger.info("Received {} rows from nagios API.".format(len(metric_data)))
#metric_data.to_csv('sample data.csv')
return metric_data
def get_hosts():
'''
Retrieve host names for all hosts being monitored
'''
api = "/nagiosxi/api/v1/objects/hoststatus"
url = nagios_vars['host_url'] + api
params = {
"apikey": nagios_vars['api_key']
}
proxies = {}
if len(nagios_vars['http_proxy']) > 0:
proxies['http'] = nagios_vars['http_proxy']
if len(nagios_vars['https_proxy']) > 0:
proxies['https'] = nagios_vars['https_proxy']
attempts = 1
response = requests.get(url, params=params, proxies=proxies, verify=False)
while response.status_code != 200 and attempts < nagios_vars['retries']:
logger.info("Failed to get host names. Retrying in {} seconds.".format(
nagios_vars['sleep_seconds']))
time.sleep(nagios_vars['sleep_seconds'])
response = requests.get(url, params=params, proxies=proxies, verify=False)
attempts += 1
if response.status_code != 200:
logger.warning("Failed to get host names in {} attempts.".format(
nagios_vars['retries']))
logger.info('Response Code: {}\nTEXT: {}'.format(
response.status_code, response.text))
sys.exit(1)
result = response.json()
if "error" in result:
logger.warning("Could not retrieve hosts from the API. Error message: {}".format(result['error']))
sys.exit(1)
all_hosts = []
for host in result['hoststatus']:
all_hosts.append(host['host_name'])
if nagios_vars['host_names_list']:
hosts = nagios_vars['host_names_list'].split(',')
hosts = [host.strip() for host in hosts if host.strip() in all_hosts]
elif nagios_vars['host_names_regex']:
re = regex.compile(nagios_vars['host_names_regex'])
hosts = list(filter(re.match, all_hosts))
else:
hosts = []
return hosts
def get_services(host):
'''
Retrieve service descriptions for all services being monitored for a host
'''
api = "/nagiosxi/api/v1/objects/servicestatus"
url = nagios_vars['host_url'] + api
params = {
"apikey": nagios_vars['api_key'],
"host_name": host
}
proxies = {}
if len(nagios_vars['http_proxy']) > 0:
proxies['http'] = nagios_vars['http_proxy']
if len(nagios_vars['https_proxy']) > 0:
proxies['https'] = nagios_vars['https_proxy']
attempts = 1
response = requests.get(url, params=params, proxies=proxies, verify=False)
while response.status_code != 200 and attempts < nagios_vars['retries']:
logger.info("Failed to get services for the host '{}'. Retrying in {} seconds.".format(
host, nagios_vars['sleep_seconds']))
time.sleep(nagios_vars['sleep_seconds'])
response = requests.get(url, params=params, proxies=proxies, verify=False)
attempts += 1
if response.status_code != 200:
logger.warning("Failed to get services for the host '{}' in {} attempts.".format(
host, nagios_vars['retries']))
logger.info('Response Code: {}\nTEXT: {}'.format(
response.status_code, response.text))
sys.exit(1)
result = response.json()
if "error" in result:
logger.warning("Could not retrieve services for the host '{}'. Error message: {}".format(host, result['error']))
return host, []
all_services = []
for service in result['servicestatus']:
all_services.append(service['service_description'])
if nagios_vars['service_descriptors_list']:
services = nagios_vars['service_descriptors_list'].split(',')
services = [service.strip() for service in services if service.strip() in all_services]
elif nagios_vars['service_descriptors_regex']:
re = regex.compile(nagios_vars['service_descriptors_regex'])
services = list(filter(re.match, all_services))
else:
services = []
return host, services
def query_nagiosAPI(args):
'''
Query Nagios API for a host name and service description
'''
#print(args)
host, service = args
api = "/nagiosxi/api/v1/objects/rrdexport"
url = nagios_vars['host_url'] + api
params = {
"apikey": nagios_vars['api_key'],
"host_name": host,
"service_description": service,
"start": (execution_time - timedelta(minutes=agent_vars['query_interval'])).timestamp(),
"end": execution_time.timestamp(),
"step": agent_vars['sampling_interval'] * 60,
"maxrows": 2**31-1
}
proxies = {}
if len(nagios_vars['http_proxy']) > 0:
proxies['http'] = nagios_vars['http_proxy']
if len(nagios_vars['https_proxy']) > 0:
proxies['https'] = nagios_vars['https_proxy']
attempts = 1
response = requests.get(url, params=params, proxies=proxies, verify=False)
while response.status_code != 200 and attempts < nagios_vars['retries']:
logger.info("Failed to get data for host '{}' and service '{}'. Retrying in {} seconds.".format(
host, service, nagios_vars['sleep_seconds']))
time.sleep(nagios_vars['sleep_seconds'])
response = requests.get(url, params=params, proxies=proxies, verify=False)
attempts += 1
if response.status_code != 200:
logger.warning("Failed to get data for host '{}' and service '{}' in {} attempts.".format(
host, service, nagios_vars['retries']))
logger.info('Response Code: {}\nTEXT: {}'.format(
response.status_code, response.text))
sys.exit(1)
result = response.json()
if "error" in result:
logger.warning("Could not get data for host '{}' and service '{}'. Error message: {}".format(host, service, result['error']))
return pd.DataFrame()
data = [row['v'] for row in result['data']['row']]
index = [int(row['t']) * 1000 for row in result['data']['row']]
columns = result['meta']['legend']['entry']
if type(columns) == list:
columns = [service.replace(' ', '') + '_' + col for col in columns]
else:
columns = [service.replace(' ', '')]
columns= ['{}[{}]'.format(col, host) for col in columns]
df = pd.DataFrame(data, index=index, columns=columns).astype(float).dropna()
return df
def send_performance_data(metric_data):
'''
Send the collected metric data to InsightFinder
'''
logger.info("Sending the performance metrics to InsightFinder.")
data_chunk = []
count = 0
for _, row in metric_data.iterrows():
entry = dict(list(zip(row.index, row)))
data_chunk.append(entry)
count += 1
if len(bytearray(json.dumps(data_chunk), 'utf8')) >= agent_vars['chunk_size']:
logger.debug("Sending a data chunk.")
send_data_chunk(data_chunk)
data_chunk = []
if len(data_chunk) != 0:
logger.debug("Sending last data chunk.")
send_data_chunk(data_chunk)
logger.info("Sent a total of {} metric rows to IF.".format(count))
def send_data_chunk(data_chunk):
'''
Send a single data chunk to IF
'''
start_time = time.time()
url = if_vars['host_url'] + '/customprojectrawdata'
data = {
'metricData': json.dumps(data_chunk),
'licenseKey': if_vars['licenseKey'],
'projectName': if_vars['project_name'],
'userName': if_vars['username'],
'agentType': 'CUSTOM'
}
proxies = {}
if len(if_vars['http_proxy']) > 0:
proxies['http'] = if_vars['http_proxy']
if len(if_vars['https_proxy']) > 0:
proxies['https'] = if_vars['https_proxy']
attempts = 1
response = requests.post(url, data=data, proxies=proxies, verify=False)
while response.status_code != 200 and attempts < if_vars['retries']:
logger.info("Failed to send data. Retrying in {} seconds.".format(
if_vars['sleep_seconds']))
time.sleep(if_vars['sleep_seconds'])
response = requests.post(url, data=data, proxies=proxies, verify=False)
attempts += 1
if response.status_code == 200:
logger.info("Successfully sent {} metric rows in {} seconds.".format(
len(data_chunk), time.time() - start_time))
else:
logger.warning("Failed to send metric data in {} attempts.".format(
if_vars['retries']))
logger.info('Response Code: {}\nTEXT: {}'.format(
response.status_code, response.text))
sys.exit(1)
if __name__ == '__main__':
execution_time = datetime.now(pytz.timezone('UTC'))
logger = set_logger_config()
config_path = 'config.ini'
if_vars, nagios_vars, agent_vars, options = get_config_vars(config_path)
try:
metric_data = collect_performance_data()
if not options.testing:
send_performance_data(metric_data)
except Exception as e:
logger.error(e, exc_info=True)
| [
"logging.getLogger",
"os.path.exists",
"pytz.timezone",
"logging.StreamHandler",
"requests.post",
"configparser.ConfigParser",
"sys.exit",
"json.dumps",
"optparse.OptionParser",
"requests.get",
"time.sleep",
"multiprocessing.pool.ThreadPool",
"time.time",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.concat",
"warnings.filterwarnings",
"regex.compile"
] | [((336, 369), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (359, 369), False, 'import warnings\n'), ((650, 669), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (667, 669), False, 'import logging\n'), ((1081, 1114), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1102, 1114), False, 'import logging\n'), ((1485, 1499), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1497, 1499), False, 'from configparser import ConfigParser\n'), ((3336, 3350), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (3348, 3350), False, 'from optparse import OptionParser\n'), ((3903, 3940), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (["agent_vars['thread_pool']"], {}), "(agent_vars['thread_pool'])\n", (3913, 3940), False, 'from multiprocessing.pool import ThreadPool\n'), ((4070, 4107), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (["agent_vars['thread_pool']"], {}), "(agent_vars['thread_pool'])\n", (4080, 4107), False, 'from multiprocessing.pool import ThreadPool\n'), ((4436, 4477), 'pandas.concat', 'pd.concat', (['metric_data'], {'axis': '(1)', 'sort': '(True)'}), '(metric_data, axis=1, sort=True)\n', (4445, 4477), True, 'import pandas as pd\n'), ((5383, 5446), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'proxies', 'verify': '(False)'}), '(url, params=params, proxies=proxies, verify=False)\n', (5395, 5446), False, 'import requests\n'), ((7293, 7356), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'proxies', 'verify': '(False)'}), '(url, params=params, proxies=proxies, verify=False)\n', (7305, 7356), False, 'import requests\n'), ((9655, 9718), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'proxies', 'verify': '(False)'}), '(url, params=params, proxies=proxies, verify=False)\n', (9667, 9718), False, 'import requests\n'), ((12007, 12018), 'time.time', 'time.time', ([], {}), '()\n', (12016, 12018), False, 'import time\n'), ((12532, 12592), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'proxies': 'proxies', 'verify': '(False)'}), '(url, data=data, proxies=proxies, verify=False)\n', (12545, 12592), False, 'import requests\n'), ((1342, 1369), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (1356, 1369), False, 'import os\n'), ((1459, 1470), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1467, 1470), False, 'import sys\n'), ((3875, 3886), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3883, 3886), False, 'import sys\n'), ((4292, 4303), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4300, 4303), False, 'import sys\n'), ((4589, 4600), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4597, 4600), False, 'import sys\n'), ((5655, 5695), 'time.sleep', 'time.sleep', (["nagios_vars['sleep_seconds']"], {}), "(nagios_vars['sleep_seconds'])\n", (5665, 5695), False, 'import time\n'), ((5715, 5778), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'proxies', 'verify': '(False)'}), '(url, params=params, proxies=proxies, verify=False)\n', (5727, 5778), False, 'import requests\n'), ((6073, 6084), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6081, 6084), False, 'import sys\n'), ((6256, 6267), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6264, 6267), False, 'import sys\n'), ((7587, 7627), 'time.sleep', 'time.sleep', (["nagios_vars['sleep_seconds']"], {}), "(nagios_vars['sleep_seconds'])\n", (7597, 7627), False, 'import time\n'), ((7647, 7710), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'proxies', 'verify': '(False)'}), '(url, params=params, proxies=proxies, verify=False)\n', (7659, 7710), False, 'import requests\n'), ((8027, 8038), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8035, 8038), False, 'import sys\n'), ((9967, 10007), 'time.sleep', 'time.sleep', (["nagios_vars['sleep_seconds']"], {}), "(nagios_vars['sleep_seconds'])\n", (9977, 10007), False, 'import time\n'), ((10027, 10090), 'requests.get', 'requests.get', (['url'], {'params': 'params', 'proxies': 'proxies', 'verify': '(False)'}), '(url, params=params, proxies=proxies, verify=False)\n', (10039, 10090), False, 'import requests\n'), ((10425, 10436), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10433, 10436), False, 'import sys\n'), ((10642, 10656), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10654, 10656), True, 'import pandas as pd\n'), ((12110, 12132), 'json.dumps', 'json.dumps', (['data_chunk'], {}), '(data_chunk)\n', (12120, 12132), False, 'import json\n'), ((12788, 12824), 'time.sleep', 'time.sleep', (["if_vars['sleep_seconds']"], {}), "(if_vars['sleep_seconds'])\n", (12798, 12824), False, 'import time\n'), ((12844, 12904), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'proxies': 'proxies', 'verify': '(False)'}), '(url, data=data, proxies=proxies, verify=False)\n', (12857, 12904), False, 'import requests\n'), ((13337, 13348), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13345, 13348), False, 'import sys\n'), ((13411, 13431), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (13424, 13431), False, 'import pytz\n'), ((6601, 6647), 'regex.compile', 'regex.compile', (["nagios_vars['host_names_regex']"], {}), "(nagios_vars['host_names_regex'])\n", (6614, 6647), False, 'import regex\n'), ((8646, 8701), 'regex.compile', 'regex.compile', (["nagios_vars['service_descriptors_regex']"], {}), "(nagios_vars['service_descriptors_regex'])\n", (8659, 8701), False, 'import regex\n'), ((9219, 9266), 'datetime.timedelta', 'timedelta', ([], {'minutes': "agent_vars['query_interval']"}), "(minutes=agent_vars['query_interval'])\n", (9228, 9266), False, 'from datetime import datetime, timedelta\n'), ((11062, 11110), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'index', 'columns': 'columns'}), '(data, index=index, columns=columns)\n', (11074, 11110), True, 'import pandas as pd\n'), ((11538, 11560), 'json.dumps', 'json.dumps', (['data_chunk'], {}), '(data_chunk)\n', (11548, 11560), False, 'import json\n'), ((13071, 13082), 'time.time', 'time.time', ([], {}), '()\n', (13080, 13082), False, 'import time\n')] |
#!/usr/bin/env python
# coding: utf-8
# ***********************************************************************
#
# V2W-BERT: A Python library for vulnerability classification
# <NAME> (<EMAIL>) : Purdue University
# <NAME> (<EMAIL>): Pacific Northwest National Laboratory
#
# ***********************************************************************
#
#
# Copyright © 2022, Battelle Memorial Institute
# All rights reserved.
#
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# 1. Redistributions of source code must retain the above copyright notice, this
#
# list of conditions and the following disclaimer.
#
#
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
#
# this list of conditions and the following disclaimer in the documentation
#
# and/or other materials provided with the distribution.
#
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ## Download and Preprocess Latest Dataset
#
# In this script we first download all CVEs to-date. Use the NVD and Mitre hierarchy documents to prepare a train test validation set.
# ## Import libraries
# In[199]:
import os
import requests, zipfile, io
import pickle
import pandas as pd
import numpy as np
# Here, I have disabled a false alarm that would otherwise trip later in the project.
pd.options.mode.chained_assignment = None
# The datetime library will let me filter the data by reporting date.
from datetime import datetime, timedelta
# Since the NVD data is housed in JavaScript Object Notation (JSON) format, I will need the json_normalize function to access and manipulate the information.
from pandas.io.json import json_normalize
import sys
import torch
import re
from ipynb.fs.full.Dataset import Data
# In[200]:
# Expanding view area to facilitate data manipulation.
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 100)
# In[201]:
import argparse
from argparse import ArgumentParser
def get_configuration():
parser = ArgumentParser()
parser.add_argument('--dir', type=str, default='Dataset')
parser.add_argument('--from_year', type=int, default=2020)
parser.add_argument('--to_year', type=int, default=2022)
parser.add_argument('--from_train_year', type=int, default=1990)
parser.add_argument('--to_train_year', type=int, default=2020)
parser.add_argument('--from_test_year', type=int, default=2021)
parser.add_argument('--to_test_year', type=int, default=2021)
parser.add_argument('--from_val_year', type=int, default=2022)
parser.add_argument('--to_val_year', type=int, default=2022)
parser.add_argument('-f') ##dummy for jupyternotebook
args = parser.parse_args()
dict_args = vars(args)
return args, dict_args
args, dict_args=get_configuration()
print(dict_args)
print(args.dir)
# In[ ]:
# ### Configuration
# In[202]:
class DataPath():
def __init__(self, args, dataset_dir='',results_dir=''):
#File locations
self.PATH_TO_DATASETS_DIRECTORY = dataset_dir+'/NVD/raw/'
self.PATH_TO_RESULTS_DIRECTORY = results_dir+'/NVD/processed/'
self.NVD_CVE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CVE_data.csv'
self.Graph_FILE=self.PATH_TO_RESULTS_DIRECTORY+'GRAPH_data'
self.GRAPHVIZ_HIERARCHY=self.PATH_TO_RESULTS_DIRECTORY+'Hierarchy'
self.MITRE_CWE_FILE=self.PATH_TO_DATASETS_DIRECTORY+'CWE_RC_1000.csv'
self.NVD_CWE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CWE_data.csv'
self.MASK_FILE = self.PATH_TO_RESULTS_DIRECTORY+'NVD_data'
self.MERGED_NVD_CVE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CVE.csv'
self.FILTERED_NVD_CWE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CWE.csv'
self.YEARS=list(range(args.from_year,args.to_year+1))
self.TRAIN_YEARS=list(range(args.from_train_year,args.to_train_year+1))
self.VAL_YEARS=list(range(args.from_val_year,args.to_val_year+1))
self.TEST_YEARS=list(range(args.from_test_year,args.to_test_year+1))
if not os.path.exists(self.PATH_TO_DATASETS_DIRECTORY):
print("Creating directory: ",self.PATH_TO_DATASETS_DIRECTORY)
os.makedirs(self.PATH_TO_DATASETS_DIRECTORY)
if not os.path.exists(self.PATH_TO_RESULTS_DIRECTORY):
print("Creating directory: ",self.PATH_TO_RESULTS_DIRECTORY)
os.makedirs(self.PATH_TO_RESULTS_DIRECTORY)
class Config(DataPath):
def __init__(self,args, dataset_dir='',results_dir=''):
super(Config, self).__init__(args, dataset_dir, results_dir)
self.CLUSTER_LABEL=0
self.download()
def download(self):
for year in self.YEARS:
if not os.path.exists(self.PATH_TO_DATASETS_DIRECTORY+'nvdcve-1.1-'+str(year)+'.json'):
url = 'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-'+str(year)+'.json.zip'
print("Downloading: ",url)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.PATH_TO_DATASETS_DIRECTORY)
print("CVEs downloaded")
if not os.path.exists(self.MITRE_CWE_FILE):
url = 'https://drive.google.com/uc?export=download&id=1-phSamb4RbxyoBc3AQ2xxKMSsK2DwPyn'
print("Downloading: ",url)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.PATH_TO_DATASETS_DIRECTORY)
print("CWEs downloaded")
config=Config(args,dataset_dir=args.dir,results_dir=args.dir)
# ### ProfecessCVES
# In[203]:
def getDataFrame(config):
df = []
counter=0
for year in config.YEARS:
yearly_data = pd.read_json(config.PATH_TO_DATASETS_DIRECTORY+'nvdcve-1.1-'+str(year)+'.json')
if counter == 0:
df = yearly_data
else:
df = df.append(yearly_data)
counter+=1
return df
# In[204]:
def removeREJECT(description):
series=[]
for x in description:
try:
if "REJECT" in (json_normalize(x)["value"])[0]:
series.append(False)
else:
series.append(True)
except:
series.append(False)
return pd.Series(series,index=description.index)
# In[205]:
def removeUnknownCWE(description):
series=[]
for x in description:
try:
if x == "UNKNOWN" or x == "NONE":
series.append(False)
else:
series.append(True)
except:
series.append(False)
return pd.Series(series,index=description.index)
# In[206]:
def getCVEDescription(df):
CVE_entry = []
CVE_index = df["cve.description.description_data"].index
for x in df["cve.description.description_data"]:
try:
raw_CVE_entry = json_normalize(x)["value"][0]
clean_CVE_entry = str(raw_CVE_entry)
CVE_entry.append(clean_CVE_entry)
except:
CVE_entry.append("NONE")
CVE_entry = pd.Series(CVE_entry, index = CVE_index)
return CVE_entry
# In[207]:
# Defining a function which I will use below
def consolidate_unknowns(x):
if x == "NVD-CWE-Other" or x == "NVD-CWE-noinfo":
return "UNKNOWN"
else:
return x
# In[208]:
def getCWEs(df):
CWE_entry = []
CWE_index = df["cve.problemtype.problemtype_data"].index
for x in df["cve.problemtype.problemtype_data"]:
try:
CWE_normalized_json_step_1 = json_normalize(x)
CWE_normalized_json_step_2 = CWE_normalized_json_step_1["description"][0]
CWEs=[]
#print(json_normalize(CWE_normalized_json_step_2)["value"])
for CWE in json_normalize(CWE_normalized_json_step_2)["value"]:
#CWEs.append(consolidate_unknowns(str(CWE)))
CWEs.append(str(CWE))
CWE_entry.append(CWEs)
except:
CWE_entry.append(['NONE'])
CWE_entry = pd.Series(CWE_entry, index = CWE_index)
return CWE_entry
# In[209]:
def ProcessDataset(config):
print("Loading data from file---")
df=getDataFrame(config)
CVE_Items = json_normalize(df["CVE_Items"])
df = pd.concat([df.reset_index(), CVE_Items], axis=1)
df = df.drop(["index", "CVE_Items"], axis=1)
df = df.rename(columns={"cve.CVE_data_meta.ID": "CVE ID"})
CVE_ID = df["CVE ID"]
df.drop(labels=["CVE ID"], axis=1,inplace = True)
df.insert(0, "CVE ID", CVE_ID)
##remove description with REJECT
print("Removing REJECTs---")
df=df[removeREJECT(df["cve.description.description_data"])]
##Extract CVE description
CVE_description=getCVEDescription(df)
df.insert(1, "CVE Description", CVE_description)
##Extract CWEs
print("Extracting CWEs---")
CWE_entry=getCWEs(df)
df.insert(2, "CWE Code", CWE_entry)
# ##Remove CWEs we don't know true label
# print("Removing Unknown CWEs---")
# df=df[removeUnknownCWE(df["CWE Code 1"])]
# Converting the data to pandas date-time format
df["publishedDate"] = pd.to_datetime(df["publishedDate"])
return df
# ### ProcessCWEs
# In[210]:
def processAndSaveCVE(config, LOAD_SAVED=True):
if not os.path.exists(config.NVD_CVE_FILE) or LOAD_SAVED==False:
df=ProcessDataset(config)
df=df[['publishedDate', 'CVE ID', 'CVE Description', 'CWE Code']]
df.to_csv(config.NVD_CVE_FILE,index=False)
else:
df=pd.read_csv(config.NVD_CVE_FILE)
return df
# In[211]:
def ProcessCWE_NVD(config):
# Importing BeautifulSoup and an xml parser to scrape the CWE definitions from the NVD web site
from bs4 import BeautifulSoup
import lxml.etree
# loading the NVD CWE Definitions page and scraping it for the first table that appears
NVD_CWE_description_url = requests.get("https://nvd.nist.gov/vuln/categories")
CWE_definitions_page_soup = BeautifulSoup(NVD_CWE_description_url.content, "html.parser")
table = CWE_definitions_page_soup.find_all('table')[0]
df_CWE_definitions = pd.read_html(str(table))[0]
return df_CWE_definitions
# In[212]:
def ProcessCWE_MITRE(config):
print('Loading CWE file : {0}'.format(config.MITRE_CWE_FILE))
#df_CWE_definitions = pd.read_csv(config.MITRE_CWE_FILE, quotechar='"',delimiter=',', encoding='latin1',index_col=False)
df_CWE_definitions = pd.read_csv(config.MITRE_CWE_FILE, delimiter=',', encoding='latin1',index_col=False)
return df_CWE_definitions
# In[213]:
def processAndSaveCWE(config, LOAD_SAVED=True):
if not os.path.exists(config.MITRE_CWE_FILE) or LOAD_SAVED==False:
df_CWE_MITRE=ProcessCWE_MITRE(config)
df_CWE_MITRE.to_csv(config.MITRE_CWE_FILE,index=False)
else:
df_CWE_MITRE=pd.read_csv(config.MITRE_CWE_FILE, index_col=False)
if not os.path.exists(config.NVD_CWE_FILE) or LOAD_SAVED==False:
df_CWE_NVD=ProcessCWE_NVD(config)
df_CWE_NVD.to_csv(config.NVD_CWE_FILE,index=False)
else:
df_CWE_NVD=pd.read_csv(config.NVD_CWE_FILE,index_col=False)
return df_CWE_MITRE, df_CWE_NVD
# In[214]:
#df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, True)
# In[215]:
#df_CWE_MITRE
#df_CWE_NVD
# In[216]:
def load_preprocessed(config, LOAD_SAVED=True):
df_CVE=processAndSaveCVE(config, LOAD_SAVED)
df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, LOAD_SAVED=True)
index1= np.argwhere(df_CWE_NVD['Name'].values == 'NVD-CWE-Other')[0][0]
index2= np.argwhere(df_CWE_NVD['Name'].values == 'NVD-CWE-noinfo')[0][0]
df_CWE_NVD.drop(index=[index1,index2], inplace = True)
return df_CVE, df_CWE_NVD, df_CWE_MITRE
# In[217]:
#load_preprocessed(config, LOAD_SAVED=False)
# ### Create Training and Test Dataset
# In[218]:
def getMask(config,df_CVE,df_CWE):
n = len(df_CWE)
m = len(df_CVE)
#get date range
train_start_date = pd.to_datetime(str(config.TRAIN_YEARS[0])+'-01-01').tz_localize('US/Eastern')
train_end_date = pd.to_datetime(str(config.TRAIN_YEARS[-1])+'-01-01').tz_localize('US/Eastern') + timedelta(days=365)
val_start_date = pd.to_datetime(str(config.VAL_YEARS[0])+'-01-01').tz_localize('US/Eastern')
val_end_date = pd.to_datetime(str(config.VAL_YEARS[-1])+'-01-01').tz_localize('US/Eastern') + timedelta(days=365)
test_start_date = pd.to_datetime(str(config.TEST_YEARS[0])+'-01-01').tz_localize('US/Eastern')
test_end_date = pd.to_datetime(str(config.TEST_YEARS[-1])+'-01-01').tz_localize('US/Eastern') + timedelta(days=365)
cwe_ids=df_CWE['Name']
cwe_map=dict(zip(cwe_ids, list(range(n))))
index_cwe_map = dict(zip(list(range(n)),cwe_ids))
#creating y and finding labeled
y=torch.zeros((m,n),dtype=torch.long)
labeled_mask= torch.zeros(m, dtype=torch.bool)
train_index = torch.zeros(m, dtype=torch.bool)
test_index = torch.zeros(m, dtype=torch.bool)
val_index = torch.zeros(m, dtype=torch.bool)
CWEs=df_CVE['CWE Code']
Dates=df_CVE['publishedDate']
for i,row in enumerate(zip(CWEs,Dates)):
cwes=row[0]
date=row[1]
if(type(cwes) == str):
cwes=[cwe for cwe in cwes.strip('[]').split("'") if not (cwe==',' or cwe==', ' or cwe=='''''')]
if(type(date) == str):
date=pd.to_datetime(date)
for cwe in cwes:
if cwe in cwe_map:
y[i][cwe_map[cwe]]=1
if torch.sum(y[i])>0:
labeled_mask[i]=True
if(train_start_date<date and date<train_end_date):
train_index[i]=True
elif(val_start_date<date and date<val_end_date):
val_index[i]=True
elif(test_start_date<date and date<test_end_date):
test_index[i]=True
else:
print(date,'-> not covered')
##convert to tensors
data=Data(train_mask=train_index, val_mask=val_index, test_mask=test_index, y=y, num_nodes=m)
return data
# In[219]:
def getPercent(data,df_CVE,df_CWE, max_data_inaclass=500):
CWEs=df_CVE['CWE Code']
train_mask= (data.train_mask == True).nonzero().flatten().numpy()
CWEs_train={}
for key in train_mask:
cwes=CWEs[key]
if(type(cwes) == str):
cwes=[cwe.strip() for cwe in cwes.strip('[]').split("'") if not (cwe==',' or cwe==', ' or cwe=='''''')]
for cwe in cwes:
if cwe in CWEs_train:
CWEs_train[cwe].append(key)
else:
CWEs_train[cwe]=[key]
required_train_mask = torch.zeros(len(data.train_mask), dtype=torch.bool)
for key, values in CWEs_train.items():
if(len(values)<max_data_inaclass):
required_train_mask[values]=True
else:
np.random.shuffle(values)
takeamnt=max_data_inaclass
required_train_mask[values[:takeamnt]]=True
data.train_mask=required_train_mask
return data
# In[ ]:
# In[220]:
from collections import OrderedDict
def CWE_description(row):
return str(row['Name'])+" "+str(row['Description'])+" "+str(row['Extended Description'])+" "+str(row['Common Consequences'])
def CWE_description_NVD(row,df_CWE_Mitre):
cwe=row['Name']
cwe_id = int(re.findall("\d+", cwe)[0])
description = df_CWE_Mitre[df_CWE_Mitre['CWE-ID'].values==cwe_id]['CVE Description'].values
if len(description)>0:
return description[0]
else:
return ''
def UpdateData(data, df_CVE, df_CWE_NVD, df_CWE_MITRE):
df_CWE_MITRE['CVE Description']= df_CWE_MITRE.apply(lambda row: CWE_description(row), axis=1)
for i, row in df_CWE_NVD.iterrows():
description=CWE_description_NVD(row, df_CWE_MITRE)
#df_CWE_NVD.set_value(i,'CVE Description',description)
df_CWE_NVD.at[i,'CVE Description']=description
df_CWE_NVD['CWE Code']= df_CWE_NVD.apply(lambda row: [str(row['Name'])], axis=1)
df_CWE_NVD=df_CWE_NVD[['CVE Description','CWE Code','Name']]
df_CVE_updated = pd.concat([df_CVE,df_CWE_NVD],ignore_index=True, sort=False)
n = len(df_CWE_NVD)
cwe_ids=df_CWE_NVD['Name']
cwe_map=dict(zip(cwe_ids, list(range(n))))
index_cwe_map = dict(zip(list(range(n)),cwe_ids))
class_labels=torch.zeros((n,n),dtype=torch.long)
CWElist=df_CWE_NVD['Name'].tolist()
for i,cwe in enumerate(CWElist):
cwe_value=cwe
class_labels[i][cwe_map[cwe_value]]=1
data.y=torch.cat((data.y,class_labels),dim=0)
class_mask=torch.cat((torch.zeros(len(data.train_mask),dtype=bool),torch.ones(len(class_labels),dtype=bool)),dim=0)
data.class_mask=class_mask
data.train_mask=torch.cat((data.train_mask,torch.zeros(len(class_labels),dtype=bool)),dim=0)
data.val_mask=torch.cat((data.val_mask,torch.zeros(len(class_labels),dtype=bool)),dim=0)
data.test_mask=torch.cat((data.test_mask,torch.zeros(len(class_labels),dtype=bool)),dim=0)
return data, df_CVE_updated, df_CWE_NVD
# ### CWE hierarchy
# In[221]:
def cwe_child(row):
item = str(row['Related Weaknesses'])
cve_p = re.compile('ChildOf:CWE ID:\\d+')
results = cve_p.findall(item)
item=''.join(results)
cve_p = re.compile('\\d+')
results = cve_p.findall(item)
results = list(map(int, results))
results=list(OrderedDict.fromkeys(results)) #preserve order
#results=list(set(results)) #order not preserve
# print(str(row['CWE-ID'])+'->', end="")
# print(results)
if(len(results)>0):
return results
else:
return [-1]
def depth_node(child_parent,node):
if -1 in child_parent[node]:
return [0]
depths=[]
for parent_node in child_parent[node]:
parent_depth=depth_node(child_parent, parent_node)
depths.extend([x+1 for x in parent_depth])
return depths
def create_group(nodes,parents):
child_parent=dict(zip(nodes,parents))
depth={}
for node, level in child_parent.items():
depth[node]=depth_node(child_parent, node)
return child_parent, depth
def save_hierarchy(config, nodes, names, child_parent):
from graphviz import Digraph,Graph
#save hierarchy graph
dot = Digraph(comment='NVD Research Concepts Hierarchy',engine='dot',node_attr={'shape': 'box'})
dot.graph_attr['rankdir'] = 'LR'
#dot=Graph(format='png')
root=1003
dot.node(str(root), "CWE-ID " + str(root) + ":" + 'Root Node')
for i in range(len(nodes)):
dot.node(str(nodes[i]), "CWE-ID " + str(nodes[i]) + ":" + names[i])
for cwe in nodes:
parents=child_parent[cwe]
if(parents[0]==-1):
dot.edge(str(cwe),str(root))
continue
for p in parents:
dot.edge(str(cwe),str(p))
#print(dot.source)
dot.format='pdf'
dot.render(config.GRAPHVIZ_HIERARCHY, view=False)
def cluster_cwes(config, cwe_c,df_CWE_NVD):
valid_cwes=[]
for cwe in df_CWE_NVD['Name']:
if cwe in ['NVD-CWE-Other','NVD-CWE-noinfo']: continue
cwe_id = int(re.findall("\d+", cwe)[0])
valid_cwes.append(cwe_id)
delete_indexs=[]
for i, row in cwe_c.iterrows():
cwe=int(row['CWE-ID'])
if(cwe not in valid_cwes):
delete_indexs.append(i)
cwe_c.drop(delete_indexs,inplace=True)
parent_columns=[]
for i, row in cwe_c.iterrows():
parents=cwe_child(row)
valid_parents=[]
for x in parents:
if x in valid_cwes:
valid_parents.append(x)
if(len(valid_parents)==0):
valid_parents.append(-1)
parent_columns.append(valid_parents)
cwe_c['parent']=parent_columns
nodes=cwe_c['CWE-ID'].tolist()
parents=cwe_c['parent'].tolist()
child_parent, depth = create_group(nodes, parents)
save_hierarchy(config, nodes, cwe_c['Name'].tolist(),child_parent)
return child_parent, depth
# df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, LOAD_SAVED=True)
# child_parent, depth = cluster_cwes(config, df_CWE_MITRE,df_CWE_NVD)
# In[222]:
def set_heirarchy(data,df_CWE_NVD):
from ipynb.fs.full.NVD_cwe_hierarchy import get_nvd_hierarchy
org_child_parent, org_parent_child, org_depth = get_nvd_hierarchy()
data.org_child_parent=org_child_parent
data.org_parent_child=org_parent_child
data.org_depth=org_depth
n = len(df_CWE_NVD)
cwe_ids=df_CWE_NVD['Name'].tolist()
if 'NVD-CWE-noinfo' in cwe_ids: cwe_ids.remove('NVD-CWE-noinfo')
if 'NVD-CWE-Other' in cwe_ids: cwe_ids.remove('NVD-CWE-Other')
cwe_ids=[int(re.findall("\d+", cwe)[0]) for cwe in cwe_ids]
cwe_map=dict(zip(cwe_ids, list(range(n))))
index_cwe_map = dict(zip(list(range(n)),cwe_ids))
child_parent={}
for c,p in org_child_parent.items():
if -1 in p:
child_parent[cwe_map[c]]=[-1 for px in p]
else:
child_parent[cwe_map[c]]=[cwe_map[px] for px in p]
parent_child={}
for p,c in org_parent_child.items():
if p==-1:
parent_child[-1]=[cwe_map[cx] for cx in c]
else:
parent_child[cwe_map[p]]=[cwe_map[cx] for cx in c]
depth={}
for i,d in org_depth.items():
depth[cwe_map[i]]=d
data.child_parent=child_parent
data.parent_child=parent_child
data.depth=depth
return data
# df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, LOAD_SAVED=True)
# set_heirarchy(Data(),df_CWE_NVD)
# In[223]:
def set_mitre_heirarchy(config, data, df_CWE_MITRE, df_CWE_NVD):
org_child_parent, org_depth = cluster_cwes(config, df_CWE_MITRE,df_CWE_NVD)
org_parent_child={}
for c,p in org_child_parent.items():
for px in p:
if px in org_parent_child:
org_parent_child[px].append(c)
else:
org_parent_child[px]=[c]
data.org_child_parent=org_child_parent
data.org_parent_child=org_parent_child
data.org_depth=org_depth
n = len(df_CWE_NVD)
cwe_ids=df_CWE_NVD['Name'].tolist()
if 'NVD-CWE-noinfo' in cwe_ids: cwe_ids.remove('NVD-CWE-noinfo')
if 'NVD-CWE-Other' in cwe_ids: cwe_ids.remove('NVD-CWE-Other')
cwe_ids=[int(re.findall("\d+", cwe)[0]) for cwe in cwe_ids]
cwe_map=dict(zip(cwe_ids, list(range(n))))
index_cwe_map = dict(zip(list(range(n)),cwe_ids))
child_parent={}
for c,p in org_child_parent.items():
if -1 in p:
child_parent[cwe_map[c]]=[-1 for px in p]
else:
child_parent[cwe_map[c]]=[cwe_map[px] for px in p]
parent_child={}
for p,c in org_parent_child.items():
if p==-1:
parent_child[-1]=[cwe_map[cx] for cx in c]
else:
parent_child[cwe_map[p]]=[cwe_map[cx] for cx in c]
depth={}
for i,d in org_depth.items():
depth[cwe_map[i]]=d
data.child_parent=child_parent
data.parent_child=parent_child
data.depth=depth
return data
# if os.uname()[0].find('Darwin')==-1: ##if not darwin(mac/locallaptop)
# DIR='/scratch/gilbreth/das90/Dataset'
# else:
# DIR='/Users/siddharthashankardas/Purdue/Dataset'
# config=Config(dataset_dir=DIR,results_dir=DIR)
# df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, LOAD_SAVED=True)
# set_mitre_heirarchy(config, Data(),df_CWE_MITRE, df_CWE_NVD)
# ### Dataset function
# In[224]:
def get_labeled_only(config,df_CVE,df_CWE):
n = len(df_CWE)
m = len(df_CVE)
cwe_ids=df_CWE['Name']
cwe_map=dict(zip(cwe_ids, list(range(n))))
labeled_mask= torch.zeros(m, dtype=torch.bool)
CWEs=df_CVE['CWE Code']
Dates=df_CVE['publishedDate']
for i,row in enumerate(zip(CWEs,Dates)):
cwes=row[0]
date=row[1]
if(type(cwes) == str):
cwes=[cwe for cwe in cwes.strip('[]').split("'") if not (cwe==',' or cwe==', ' or cwe=='''''')]
for cwe in cwes:
if cwe in cwe_map:
labeled_mask[i]=True
break
print(sum(labeled_mask))
labeled_indexs= (labeled_mask == True).nonzero().flatten().numpy()
df_CVE=df_CVE.iloc[labeled_indexs,:]
return df_CVE
def take_subset(config,df_CVE,df_CWE_NVD,take=100):
df_CVE=get_labeled_only(config,df_CVE,df_CWE_NVD)
df_CVE=df_CVE.sample(n = take, replace = False)
return df_CVE
# In[225]:
def ExtractFeature(config, max_data_inaclass=-1, RECOMPUTE=False, LOAD_SAVED=True, take=-1, hierarchy=''):
save_filename=config.MASK_FILE
nvd_filename=config.MERGED_NVD_CVE_FILE
cwe_filename=config.FILTERED_NVD_CWE_FILE
if RECOMPUTE==True or not os.path.exists(save_filename) or not os.path.exists(nvd_filename) or not os.path.exists(cwe_filename):
df_CVE, df_CWE_NVD, df_CWE_MITRE =load_preprocessed(config, LOAD_SAVED)
if take>-1:
print('Selecting a subset')
df_CVE=take_subset(config,df_CVE,df_CWE_NVD,take)
print('Done...')
data=getMask(config,df_CVE,df_CWE_NVD)
if(max_data_inaclass!=-1):
data=getPercent(data,df_CVE,df_CWE_NVD,max_data_inaclass)
data, df_CVE_merged, df_CWE = UpdateData(data, df_CVE, df_CWE_NVD, df_CWE_MITRE)
if hierarchy=='nvd':
print('Using nvd hierarchy')
data=set_heirarchy(data,df_CWE_NVD)
else:
print('using mitre hierarchy')
data=set_mitre_heirarchy(config, data, df_CWE_MITRE, df_CWE_NVD)
pickle.dump(data,open(save_filename, "wb" ))
df_CVE_merged.to_csv(nvd_filename,index=False)
df_CWE.to_csv(cwe_filename,index=False)
else:
data= pickle.load(open(save_filename, "rb" ))
df_CVE_merged=pd.read_csv(nvd_filename,low_memory=False)
df_CWE=pd.read_csv(cwe_filename,low_memory=False)
return data, df_CVE_merged, df_CWE
# In[226]:
# df_CVE, df_CWE_NVD, df_CWE_MITRE = load_preprocessed(config, True)
# data=getMask(config,df_CVE,df_CWE_NVD)
# data, df_CVE_merged, df_CWE = UpdateData(data, df_CVE, df_CWE_NVD, df_CWE_MITRE)
# ## Scratch
# In[227]:
# if os.uname()[0].find('Darwin')==-1: ##if not darwin(mac/locallaptop)
# DIR='/scratch/gilbreth/das90/Dataset'
# else:
# DIR='/Users/siddharthashankardas/Purdue/Dataset'
#config=Config(dataset_dir=DIR,results_dir=DIR)
data, df_CVE, df_CWE=ExtractFeature(config,max_data_inaclass=-1, RECOMPUTE=True, LOAD_SAVED=False, take=-1, hierarchy='mitre')
# In[228]:
print("Train size:", sum(data.train_mask))
print("Val size:", sum(data.val_mask))
print("Test size:", sum(data.test_mask))
print("Class size:",sum(data.class_mask))
# In[229]:
print(len(data.train_mask))
# In[230]:
# df_CVE, df_CWE_NVD, df_CWE_MITRE =load_preprocessed(config, LOAD_SAVED=True)
# take_subset(config,df_CVE,df_CWE_NVD, take=10000)
# ## Main
# In[231]:
# if __name__ == '__main__':
# if os.uname()[0].find('Darwin')==-1: ##if not darwin(mac/locallaptop)
# DIR='/scratch/gilbreth/das90/Dataset'
# else:
# DIR='/Users/siddharthashankardas/Purdue/Dataset'
# config=Config(dataset_dir=DIR,results_dir=DIR)
# data, df_CVE, df_CWE = ExtractFeature(config,max_data_inaclass=-1, RECOMPUTE=True, LOAD_SAVED=True)
# print("Train size:", sum(data.train_mask))
# print("Val size:", sum(data.val_mask))
# print("Test size:", sum(data.test_mask))
# print("Class size:",sum(data.class_mask))
# print("Total: ",sum(data.train_mask)+sum(data.val_mask)+sum(data.test_mask))
# In[ ]:
| [
"pandas.read_csv",
"re.compile",
"io.BytesIO",
"torch.sum",
"datetime.timedelta",
"pandas.to_datetime",
"os.path.exists",
"ipynb.fs.full.Dataset.Data",
"collections.OrderedDict.fromkeys",
"argparse.ArgumentParser",
"pandas.set_option",
"requests.get",
"graphviz.Digraph",
"re.findall",
"torch.cat",
"pandas.Series",
"os.makedirs",
"bs4.BeautifulSoup",
"numpy.argwhere",
"ipynb.fs.full.NVD_cwe_hierarchy.get_nvd_hierarchy",
"pandas.concat",
"torch.zeros",
"pandas.io.json.json_normalize",
"numpy.random.shuffle"
] | [((2725, 2762), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(20)'], {}), "('display.max_rows', 20)\n", (2738, 2762), True, 'import pandas as pd\n'), ((2763, 2804), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(100)'], {}), "('display.max_columns', 100)\n", (2776, 2804), True, 'import pandas as pd\n'), ((2911, 2927), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2925, 2927), False, 'from argparse import ArgumentParser\n'), ((7363, 7405), 'pandas.Series', 'pd.Series', (['series'], {'index': 'description.index'}), '(series, index=description.index)\n', (7372, 7405), True, 'import pandas as pd\n'), ((7734, 7776), 'pandas.Series', 'pd.Series', (['series'], {'index': 'description.index'}), '(series, index=description.index)\n', (7743, 7776), True, 'import pandas as pd\n'), ((8187, 8224), 'pandas.Series', 'pd.Series', (['CVE_entry'], {'index': 'CVE_index'}), '(CVE_entry, index=CVE_index)\n', (8196, 8224), True, 'import pandas as pd\n'), ((9146, 9183), 'pandas.Series', 'pd.Series', (['CWE_entry'], {'index': 'CWE_index'}), '(CWE_entry, index=CWE_index)\n', (9155, 9183), True, 'import pandas as pd\n'), ((9339, 9370), 'pandas.io.json.json_normalize', 'json_normalize', (["df['CVE_Items']"], {}), "(df['CVE_Items'])\n", (9353, 9370), False, 'from pandas.io.json import json_normalize\n'), ((10278, 10313), 'pandas.to_datetime', 'pd.to_datetime', (["df['publishedDate']"], {}), "(df['publishedDate'])\n", (10292, 10313), True, 'import pandas as pd\n'), ((11075, 11127), 'requests.get', 'requests.get', (['"""https://nvd.nist.gov/vuln/categories"""'], {}), "('https://nvd.nist.gov/vuln/categories')\n", (11087, 11127), False, 'import requests, zipfile, io\n'), ((11161, 11222), 'bs4.BeautifulSoup', 'BeautifulSoup', (['NVD_CWE_description_url.content', '"""html.parser"""'], {}), "(NVD_CWE_description_url.content, 'html.parser')\n", (11174, 11222), False, 'from bs4 import BeautifulSoup\n'), ((11641, 11730), 'pandas.read_csv', 'pd.read_csv', (['config.MITRE_CWE_FILE'], {'delimiter': '""","""', 'encoding': '"""latin1"""', 'index_col': '(False)'}), "(config.MITRE_CWE_FILE, delimiter=',', encoding='latin1',\n index_col=False)\n", (11652, 11730), True, 'import pandas as pd\n'), ((14048, 14085), 'torch.zeros', 'torch.zeros', (['(m, n)'], {'dtype': 'torch.long'}), '((m, n), dtype=torch.long)\n', (14059, 14085), False, 'import torch\n'), ((14106, 14138), 'torch.zeros', 'torch.zeros', (['m'], {'dtype': 'torch.bool'}), '(m, dtype=torch.bool)\n', (14117, 14138), False, 'import torch\n'), ((14157, 14189), 'torch.zeros', 'torch.zeros', (['m'], {'dtype': 'torch.bool'}), '(m, dtype=torch.bool)\n', (14168, 14189), False, 'import torch\n'), ((14207, 14239), 'torch.zeros', 'torch.zeros', (['m'], {'dtype': 'torch.bool'}), '(m, dtype=torch.bool)\n', (14218, 14239), False, 'import torch\n'), ((14256, 14288), 'torch.zeros', 'torch.zeros', (['m'], {'dtype': 'torch.bool'}), '(m, dtype=torch.bool)\n', (14267, 14288), False, 'import torch\n'), ((15345, 15437), 'ipynb.fs.full.Dataset.Data', 'Data', ([], {'train_mask': 'train_index', 'val_mask': 'val_index', 'test_mask': 'test_index', 'y': 'y', 'num_nodes': 'm'}), '(train_mask=train_index, val_mask=val_index, test_mask=test_index, y=y,\n num_nodes=m)\n', (15349, 15437), False, 'from ipynb.fs.full.Dataset import Data\n'), ((17593, 17655), 'pandas.concat', 'pd.concat', (['[df_CVE, df_CWE_NVD]'], {'ignore_index': '(True)', 'sort': '(False)'}), '([df_CVE, df_CWE_NVD], ignore_index=True, sort=False)\n', (17602, 17655), True, 'import pandas as pd\n'), ((17835, 17872), 'torch.zeros', 'torch.zeros', (['(n, n)'], {'dtype': 'torch.long'}), '((n, n), dtype=torch.long)\n', (17846, 17872), False, 'import torch\n'), ((18037, 18077), 'torch.cat', 'torch.cat', (['(data.y, class_labels)'], {'dim': '(0)'}), '((data.y, class_labels), dim=0)\n', (18046, 18077), False, 'import torch\n'), ((18669, 18702), 're.compile', 're.compile', (['"""ChildOf:CWE ID:\\\\d+"""'], {}), "('ChildOf:CWE ID:\\\\d+')\n", (18679, 18702), False, 'import re\n'), ((18775, 18793), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (18785, 18793), False, 'import re\n'), ((19814, 19911), 'graphviz.Digraph', 'Digraph', ([], {'comment': '"""NVD Research Concepts Hierarchy"""', 'engine': '"""dot"""', 'node_attr': "{'shape': 'box'}"}), "(comment='NVD Research Concepts Hierarchy', engine='dot', node_attr=\n {'shape': 'box'})\n", (19821, 19911), False, 'from graphviz import Digraph, Graph\n'), ((21917, 21936), 'ipynb.fs.full.NVD_cwe_hierarchy.get_nvd_hierarchy', 'get_nvd_hierarchy', ([], {}), '()\n', (21934, 21936), False, 'from ipynb.fs.full.NVD_cwe_hierarchy import get_nvd_hierarchy\n'), ((25381, 25413), 'torch.zeros', 'torch.zeros', (['m'], {'dtype': 'torch.bool'}), '(m, dtype=torch.bool)\n', (25392, 25413), False, 'import torch\n'), ((10689, 10721), 'pandas.read_csv', 'pd.read_csv', (['config.NVD_CVE_FILE'], {}), '(config.NVD_CVE_FILE)\n', (10700, 10721), True, 'import pandas as pd\n'), ((12048, 12099), 'pandas.read_csv', 'pd.read_csv', (['config.MITRE_CWE_FILE'], {'index_col': '(False)'}), '(config.MITRE_CWE_FILE, index_col=False)\n', (12059, 12099), True, 'import pandas as pd\n'), ((12317, 12366), 'pandas.read_csv', 'pd.read_csv', (['config.NVD_CWE_FILE'], {'index_col': '(False)'}), '(config.NVD_CWE_FILE, index_col=False)\n', (12328, 12366), True, 'import pandas as pd\n'), ((13413, 13432), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (13422, 13432), False, 'from datetime import datetime, timedelta\n'), ((13628, 13647), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (13637, 13647), False, 'from datetime import datetime, timedelta\n'), ((13847, 13866), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (13856, 13866), False, 'from datetime import datetime, timedelta\n'), ((18883, 18912), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['results'], {}), '(results)\n', (18903, 18912), False, 'from collections import OrderedDict\n'), ((27730, 27773), 'pandas.read_csv', 'pd.read_csv', (['nvd_filename'], {'low_memory': '(False)'}), '(nvd_filename, low_memory=False)\n', (27741, 27773), True, 'import pandas as pd\n'), ((27788, 27831), 'pandas.read_csv', 'pd.read_csv', (['cwe_filename'], {'low_memory': '(False)'}), '(cwe_filename, low_memory=False)\n', (27799, 27831), True, 'import pandas as pd\n'), ((5044, 5091), 'os.path.exists', 'os.path.exists', (['self.PATH_TO_DATASETS_DIRECTORY'], {}), '(self.PATH_TO_DATASETS_DIRECTORY)\n', (5058, 5091), False, 'import os\n'), ((5179, 5223), 'os.makedirs', 'os.makedirs', (['self.PATH_TO_DATASETS_DIRECTORY'], {}), '(self.PATH_TO_DATASETS_DIRECTORY)\n', (5190, 5223), False, 'import os\n'), ((5239, 5285), 'os.path.exists', 'os.path.exists', (['self.PATH_TO_RESULTS_DIRECTORY'], {}), '(self.PATH_TO_RESULTS_DIRECTORY)\n', (5253, 5285), False, 'import os\n'), ((5372, 5415), 'os.makedirs', 'os.makedirs', (['self.PATH_TO_RESULTS_DIRECTORY'], {}), '(self.PATH_TO_RESULTS_DIRECTORY)\n', (5383, 5415), False, 'import os\n'), ((6180, 6215), 'os.path.exists', 'os.path.exists', (['self.MITRE_CWE_FILE'], {}), '(self.MITRE_CWE_FILE)\n', (6194, 6215), False, 'import os\n'), ((6373, 6390), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6385, 6390), False, 'import requests, zipfile, io\n'), ((8669, 8686), 'pandas.io.json.json_normalize', 'json_normalize', (['x'], {}), '(x)\n', (8683, 8686), False, 'from pandas.io.json import json_normalize\n'), ((10435, 10470), 'os.path.exists', 'os.path.exists', (['config.NVD_CVE_FILE'], {}), '(config.NVD_CVE_FILE)\n', (10449, 10470), False, 'import os\n'), ((11840, 11877), 'os.path.exists', 'os.path.exists', (['config.MITRE_CWE_FILE'], {}), '(config.MITRE_CWE_FILE)\n', (11854, 11877), False, 'import os\n'), ((12121, 12156), 'os.path.exists', 'os.path.exists', (['config.NVD_CWE_FILE'], {}), '(config.NVD_CWE_FILE)\n', (12135, 12156), False, 'import os\n'), ((12736, 12793), 'numpy.argwhere', 'np.argwhere', (["(df_CWE_NVD['Name'].values == 'NVD-CWE-Other')"], {}), "(df_CWE_NVD['Name'].values == 'NVD-CWE-Other')\n", (12747, 12793), True, 'import numpy as np\n'), ((12812, 12870), 'numpy.argwhere', 'np.argwhere', (["(df_CWE_NVD['Name'].values == 'NVD-CWE-noinfo')"], {}), "(df_CWE_NVD['Name'].values == 'NVD-CWE-noinfo')\n", (12823, 12870), True, 'import numpy as np\n'), ((14667, 14687), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (14681, 14687), True, 'import pandas as pd\n'), ((14842, 14857), 'torch.sum', 'torch.sum', (['y[i]'], {}), '(y[i])\n', (14851, 14857), False, 'import torch\n'), ((16308, 16333), 'numpy.random.shuffle', 'np.random.shuffle', (['values'], {}), '(values)\n', (16325, 16333), True, 'import numpy as np\n'), ((16817, 16840), 're.findall', 're.findall', (['"""\\\\d+"""', 'cwe'], {}), "('\\\\d+', cwe)\n", (16827, 16840), False, 'import re\n'), ((26541, 26570), 'os.path.exists', 'os.path.exists', (['save_filename'], {}), '(save_filename)\n', (26555, 26570), False, 'import os\n'), ((26578, 26606), 'os.path.exists', 'os.path.exists', (['nvd_filename'], {}), '(nvd_filename)\n', (26592, 26606), False, 'import os\n'), ((26614, 26642), 'os.path.exists', 'os.path.exists', (['cwe_filename'], {}), '(cwe_filename)\n', (26628, 26642), False, 'import os\n'), ((5975, 5992), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5987, 5992), False, 'import requests, zipfile, io\n'), ((6423, 6444), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (6433, 6444), False, 'import requests, zipfile, io\n'), ((8888, 8930), 'pandas.io.json.json_normalize', 'json_normalize', (['CWE_normalized_json_step_2'], {}), '(CWE_normalized_json_step_2)\n', (8902, 8930), False, 'from pandas.io.json import json_normalize\n'), ((20694, 20717), 're.findall', 're.findall', (['"""\\\\d+"""', 'cwe'], {}), "('\\\\d+', cwe)\n", (20704, 20717), False, 'import re\n'), ((22284, 22307), 're.findall', 're.findall', (['"""\\\\d+"""', 'cwe'], {}), "('\\\\d+', cwe)\n", (22294, 22307), False, 'import re\n'), ((23974, 23997), 're.findall', 're.findall', (['"""\\\\d+"""', 'cwe'], {}), "('\\\\d+', cwe)\n", (23984, 23997), False, 'import re\n'), ((6029, 6050), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (6039, 6050), False, 'import requests, zipfile, io\n'), ((7993, 8010), 'pandas.io.json.json_normalize', 'json_normalize', (['x'], {}), '(x)\n', (8007, 8010), False, 'from pandas.io.json import json_normalize\n'), ((7143, 7160), 'pandas.io.json.json_normalize', 'json_normalize', (['x'], {}), '(x)\n', (7157, 7160), False, 'from pandas.io.json import json_normalize\n')] |
from setuptools import setup, find_packages
import numpy as np
setup(name='tssb',
version='0.1',
url='https://github.com/ermshaua/time-series-segmentation-benchmark',
license='BSD 3-Clause License',
author='<NAME>',
description='This repository contains a time series segmentation benchmark.',
packages=find_packages(exclude=['tests', 'examples']),
package_data={'': ['LICENSE']},
include_package_data=True,
install_requires=np.loadtxt(fname='requirements.txt', delimiter='\n', dtype=np.str).tolist(),
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
zip_safe=False) | [
"numpy.loadtxt",
"setuptools.find_packages"
] | [((341, 385), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'examples']"}), "(exclude=['tests', 'examples'])\n", (354, 385), False, 'from setuptools import setup, find_packages\n'), ((481, 547), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': '"""requirements.txt"""', 'delimiter': '"""\n"""', 'dtype': 'np.str'}), "(fname='requirements.txt', delimiter='\\n', dtype=np.str)\n", (491, 547), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as op
import sys
import logging
import numpy as np
import pandas as pd
from subprocess import Popen, PIPE, run
from jinja2 import Template
from jcvi.apps.base import sh, mkdir
from jcvi.formats.base import must_open
def read_samplelist(fs):
assert op.isfile(fs), "samplelist not found: %s" % fs
cvts = dict(SampleID=str,Tissue=str,Genotype=str)
sl = pd.read_csv(fs, sep="\t", header=0, converters=cvts,
true_values=['1','Y','Yes','T','True'],
false_values=['0','N','No','F','False'])
return sl
def check_fastq_0(args, yid):
fi = "%s/%s.tsv" % (args.metadir, yid)
sl = read_samplelist(fi)
cdic = dict(single=0, pair=0)
for i in range(len(sl)):
sid, paired = sl['SampleID'][i], sl['paired'][i]
if paired:
f1 = "%s/%s/%s_1.fq.gz" % (args.seqdir, yid, sid)
f1b = "%s/%s/%s_R1.fq.gz" % (args.seqdir, yid, sid)
f2 = "%s/%s/%s_2.fq.gz" % (args.seqdir, yid, sid)
f2b = "%s/%s/%s_R2.fq.gz" % (args.seqdir, yid, sid)
assert op.isfile(f1) or op.isfile(f1b), "fastq not found: %s" % f1
assert op.isfile(f2) or op.isfile(f2b), "fastq not found: %s" % f2
cdic["pair"] += 1
else:
f1 = "%s/%s/%s.fq.gz" % (args.seqdir, yid, sid)
f1b = "%s/%s/%s_R0.fq.gz" % (args.seqdir, yid, sid)
assert op.isfile(f1) or op.isfile(f1b), "fastq not found: %s" % f1
cdic["single"] += 1
fcdic = { k: v for k, v in cdic.items() if v > 0}
single_end = False
if len(fcdic) > 1:
print("mixed library: %d single + %d paired" % (fcdir['single'], fcdir['pair']))
else:
if list(fcdic.keys())[0] == 'single': single_end = True
return single_end
def check_fastq(design, paired, source):
sl = read_samplelist(design)
cdic = dict(SE=0, PE=0)
if source == 'mixed': assert 'source' in sl
if paired == 'mixed': assert 'paired' in sl
for i in range(len(sl)):
sid, r0, r1, r2 = sl['SampleID'][i], sl['r0'][i], sl['r1'][i], sl['r2'][i]
source1 = source
if 'source' in sl:
if source == 'mixed':
source1 = sl['source'][i]
else:
assert sl['source'][i] == source
paired1 = paired
if 'paired' in sl:
if paired == 'mixed':
paired1 = sl['paired'][i]
else:
assert sl['paired'][i] == paired
if paired1 == 'SE':
if source1 == 'local':
assert op.isfile(r0)
elif source1 == 'sra':
assert r0.startswith("SRR") or r0.startswith("DRR")
elif source1 == 's3':
assert r0.startswith("s3://")
else:
if source1 == 'local':
assert op.isfile(r1)
assert op.isfile(r2)
elif source1 == 'sra':
assert r0.startswith("SRR")
elif source1 == 's3':
assert r1.startswith("s3://")
assert r2.startswith("s3://")
def nf_start(args):
yid = args.yid
barn, genome = args.metadir, args.genome
design = "design.tsv"
ft = "%s/%s.config" % (args.cfgdir, args.lib)
fht = must_open(ft, 'r')
tmp = Template(fht.read())
aligner_map = dict(rnaseq='hisat2',chipseq='bwa',methylseq='bismark_hisat2')
aligner = aligner_map[args.lib] if args.aligner == 'auto' else args.aligner
msg = tmp.render(yid = yid,
design = design,
source = args.source,
read_type = args.read_type,
paired = args.paired,
interleaved = str(args.interleaved).lower(),
save_fastq = str(args.save_fastq).lower(),
trimmer = args.trimmer,
save_trimmed = str(args.save_trimmed).lower(),
aligner = aligner,
saveBAM = str(args.saveBAM).lower(),
genome = args.genome,
skip_preseq = str(not args.preseq).lower(),
skip_markdup = str(not args.markdup).lower(),
stranded = args.stranded,
count_multi = str(args.multi).lower(),
ase = str(args.ase).lower(),
ril = str(args.ril).lower(),
cage = str(args.cage).lower(),
salmon = str(args.salmon).lower(),
stringtie = str(args.stringtie).lower(),
narrow_peak = str(args.narrow_peak).lower()
)
rundir = op.join(args.projdir, args.lib, 'nf', yid)
workdir = op.join(args.workdir, args.lib, yid)
rawdir = op.join(args.rawdir, args.lib, yid)
mkdir(rundir, overwrite=True)
mkdir(workdir, overwrite=True)
mkdir(rawdir, overwrite=True)
os.chdir(rundir)
# metadir = op.join(barn, genome, '05_excel')
# xls = "%s/%s.xlsx" % (metadir, yid)
# assert op.isfile(xls)
sh(f"nf.barn.py {yid} -o design.tsv")
#check_fastq(design, args.paired, args.source)
fc = "nextflow.config"
fhc = must_open(fc, 'w')
fhc.write(msg)
fhc.close()
sh("ln -sf %s/ work" % workdir)
sh("ln -sf %s/ raw" % rawdir)
def cpnf(args):
tags = args.tags.split(",")
print("will copy %d project results: %s" % (len(tags), ' '.join(tags)))
for tag in tags:
ds = op.join(args.srcdir, tag)
dd = op.join(args.destdir, tag)
if op.isdir(ds):
print("%s : copying" % tag)
sh("find %s -not \\( -name \"*.bam\" -o -name \"*.bigwig\" \\) | cpio -pdm %s" % (ds, dd))
else:
print("%s : not exist - skipped")
if __name__ == "__main__":
import argparse
ps = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = "initiate a nextflow project (creating design and config files)"
)
libs = ['rnaseq','smrnaseq','chipseq','dapseq','atacseq','methylseq','dnaseq']
allowed_sources = ['sra','local','s3','mixed']
ps.add_argument('lib', choices = libs, help = 'library type')
ps.add_argument('yid', help = 'study/project id')
ps.add_argument('--projdir', default=os.environ['proj'], help = 'project dir')
ps.add_argument('--cfgdir', default="%s/configs/templates" % os.environ['nf'], help = 'nextflow template config dir')
ps.add_argument('--workdir', default=os.environ['NXF_WORK'], help = 'nextflow work dir')
ps.add_argument('--rawdir', default="%s/raw" % os.environ['NXF_CACHE'], help = 'nextflow raw output dir')
ps.add_argument('--source', default='local', choices=allowed_sources, help='sequence source')
ps.add_argument('--read_type', default='illumina', choices=['illumina','nanopore'], help='read type')
ps.add_argument('--paired', default='SE', choices=['SE','PE','mixed'], help='single end, paired end or mixed')
ps.add_argument('--interleaved', action='store_true', help='interleaved format?')
ps.add_argument('--metadir', default=os.environ['ba'], help = 'meta table directory')
ps.add_argument('--genome', default='Zmays_B73v5', help = 'reference genome')
ps.add_argument('--keep', action='store_true', help='keep previous results?')
ps.add_argument('--save_fastq', action='store_true', help='save fastq files?')
ps.add_argument('--trimmer', default='trim_galore', choices=['no','trim_galore'], help='trimming software')
ps.add_argument('--save_trimmed', action='store_true', help='save trimmed fastq files?')
ps.add_argument('--aligner', default='auto', help='aligning software')
ps.add_argument('--saveBAM', action='store_true', help='save bam files?')
ps.add_argument('--preseq', action='store_true', help='run preseq?')
ps.add_argument('--markdup', action='store_true', help='mark PCR duplicates?')
g1 = ps.add_argument_group('rnaseq', 'RNA-Seq specific arguments')
g1.add_argument('--stranded', default='no', choices=['no','forward','reverse'], help = 'read strandedness')
g1.add_argument('--multi', action='store_true', help='count multi-mapping reads?')
g1.add_argument('--ase', action='store_true', help='allele specific expression?')
g1.add_argument('--ril', action='store_true', help='genotype (ril) samples?')
g1.add_argument('--cage', action='store_true', help='run CAGE pipeline?')
g1.add_argument('--salmon', action='store_true', help='run salmon?')
g1.add_argument('--stringtie', action='store_true', help='run stringtie?')
g2 = ps.add_argument_group('chipseq', 'chip-seq specific arguments')
g2.add_argument('--narrow_peak', action='store_true', help = 'turn off broad peak calling mode in MACS2')
args = ps.parse_args()
nf_start(args)
| [
"pandas.read_csv",
"argparse.ArgumentParser",
"jcvi.formats.base.must_open",
"os.path.join",
"os.path.isfile",
"os.chdir",
"jcvi.apps.base.sh",
"jcvi.apps.base.mkdir",
"os.path.isdir"
] | [((326, 339), 'os.path.isfile', 'op.isfile', (['fs'], {}), '(fs)\n', (335, 339), True, 'import os.path as op\n'), ((436, 581), 'pandas.read_csv', 'pd.read_csv', (['fs'], {'sep': '"""\t"""', 'header': '(0)', 'converters': 'cvts', 'true_values': "['1', 'Y', 'Yes', 'T', 'True']", 'false_values': "['0', 'N', 'No', 'F', 'False']"}), "(fs, sep='\\t', header=0, converters=cvts, true_values=['1', 'Y',\n 'Yes', 'T', 'True'], false_values=['0', 'N', 'No', 'F', 'False'])\n", (447, 581), True, 'import pandas as pd\n'), ((3326, 3344), 'jcvi.formats.base.must_open', 'must_open', (['ft', '"""r"""'], {}), "(ft, 'r')\n", (3335, 3344), False, 'from jcvi.formats.base import must_open\n'), ((4723, 4765), 'os.path.join', 'op.join', (['args.projdir', 'args.lib', '"""nf"""', 'yid'], {}), "(args.projdir, args.lib, 'nf', yid)\n", (4730, 4765), True, 'import os.path as op\n'), ((4780, 4816), 'os.path.join', 'op.join', (['args.workdir', 'args.lib', 'yid'], {}), '(args.workdir, args.lib, yid)\n', (4787, 4816), True, 'import os.path as op\n'), ((4830, 4865), 'os.path.join', 'op.join', (['args.rawdir', 'args.lib', 'yid'], {}), '(args.rawdir, args.lib, yid)\n', (4837, 4865), True, 'import os.path as op\n'), ((4870, 4899), 'jcvi.apps.base.mkdir', 'mkdir', (['rundir'], {'overwrite': '(True)'}), '(rundir, overwrite=True)\n', (4875, 4899), False, 'from jcvi.apps.base import sh, mkdir\n'), ((4904, 4934), 'jcvi.apps.base.mkdir', 'mkdir', (['workdir'], {'overwrite': '(True)'}), '(workdir, overwrite=True)\n', (4909, 4934), False, 'from jcvi.apps.base import sh, mkdir\n'), ((4939, 4968), 'jcvi.apps.base.mkdir', 'mkdir', (['rawdir'], {'overwrite': '(True)'}), '(rawdir, overwrite=True)\n', (4944, 4968), False, 'from jcvi.apps.base import sh, mkdir\n'), ((4973, 4989), 'os.chdir', 'os.chdir', (['rundir'], {}), '(rundir)\n', (4981, 4989), False, 'import os\n'), ((5115, 5152), 'jcvi.apps.base.sh', 'sh', (['f"""nf.barn.py {yid} -o design.tsv"""'], {}), "(f'nf.barn.py {yid} -o design.tsv')\n", (5117, 5152), False, 'from jcvi.apps.base import sh, mkdir\n'), ((5242, 5260), 'jcvi.formats.base.must_open', 'must_open', (['fc', '"""w"""'], {}), "(fc, 'w')\n", (5251, 5260), False, 'from jcvi.formats.base import must_open\n'), ((5301, 5332), 'jcvi.apps.base.sh', 'sh', (["('ln -sf %s/ work' % workdir)"], {}), "('ln -sf %s/ work' % workdir)\n", (5303, 5332), False, 'from jcvi.apps.base import sh, mkdir\n'), ((5337, 5366), 'jcvi.apps.base.sh', 'sh', (["('ln -sf %s/ raw' % rawdir)"], {}), "('ln -sf %s/ raw' % rawdir)\n", (5339, 5366), False, 'from jcvi.apps.base import sh, mkdir\n'), ((5877, 6044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""initiate a nextflow project (creating design and config files)"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'initiate a nextflow project (creating design and config files)')\n", (5900, 6044), False, 'import argparse\n'), ((5526, 5551), 'os.path.join', 'op.join', (['args.srcdir', 'tag'], {}), '(args.srcdir, tag)\n', (5533, 5551), True, 'import os.path as op\n'), ((5565, 5591), 'os.path.join', 'op.join', (['args.destdir', 'tag'], {}), '(args.destdir, tag)\n', (5572, 5591), True, 'import os.path as op\n'), ((5603, 5615), 'os.path.isdir', 'op.isdir', (['ds'], {}), '(ds)\n', (5611, 5615), True, 'import os.path as op\n'), ((5669, 5759), 'jcvi.apps.base.sh', 'sh', (['(\'find %s -not \\\\( -name "*.bam" -o -name "*.bigwig" \\\\) | cpio -pdm %s\' %\n (ds, dd))'], {}), '(\'find %s -not \\\\( -name "*.bam" -o -name "*.bigwig" \\\\) | cpio -pdm %s\' %\n (ds, dd))\n', (5671, 5759), False, 'from jcvi.apps.base import sh, mkdir\n'), ((1139, 1152), 'os.path.isfile', 'op.isfile', (['f1'], {}), '(f1)\n', (1148, 1152), True, 'import os.path as op\n'), ((1156, 1170), 'os.path.isfile', 'op.isfile', (['f1b'], {}), '(f1b)\n', (1165, 1170), True, 'import os.path as op\n'), ((1218, 1231), 'os.path.isfile', 'op.isfile', (['f2'], {}), '(f2)\n', (1227, 1231), True, 'import os.path as op\n'), ((1235, 1249), 'os.path.isfile', 'op.isfile', (['f2b'], {}), '(f2b)\n', (1244, 1249), True, 'import os.path as op\n'), ((1465, 1478), 'os.path.isfile', 'op.isfile', (['f1'], {}), '(f1)\n', (1474, 1478), True, 'import os.path as op\n'), ((1482, 1496), 'os.path.isfile', 'op.isfile', (['f1b'], {}), '(f1b)\n', (1491, 1496), True, 'import os.path as op\n'), ((2629, 2642), 'os.path.isfile', 'op.isfile', (['r0'], {}), '(r0)\n', (2638, 2642), True, 'import os.path as op\n'), ((2898, 2911), 'os.path.isfile', 'op.isfile', (['r1'], {}), '(r1)\n', (2907, 2911), True, 'import os.path as op\n'), ((2935, 2948), 'os.path.isfile', 'op.isfile', (['r2'], {}), '(r2)\n', (2944, 2948), True, 'import os.path as op\n')] |
'''
testimages.py
Jusa test program to test the librarys
Author: <NAME>
'''
import tkinter, tkinter.filedialog
from GIF import GIF
root = tkinter.Tk()
root.withdraw()
file_path = tkinter.filedialog.askopenfilename()
f = open(file_path,"rb")
myImage = GIF()
myImage.loadGIF(f)
#for p in myImage.getImage(0).pixels:
# print(p)
images = myImage.getImages()
_i = 0
#see the image in cmd line
for img in images:
print("Image #"+str(_i))
img.showImage()
_i+=1
myImage.save("C:\\test.gif") | [
"GIF.GIF",
"tkinter.Tk",
"tkinter.filedialog.askopenfilename"
] | [((142, 154), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (152, 154), False, 'import tkinter, tkinter.filedialog\n'), ((184, 220), 'tkinter.filedialog.askopenfilename', 'tkinter.filedialog.askopenfilename', ([], {}), '()\n', (218, 220), False, 'import tkinter, tkinter.filedialog\n'), ((258, 263), 'GIF.GIF', 'GIF', ([], {}), '()\n', (261, 263), False, 'from GIF import GIF\n')] |
import tweepy
import secrets_keys as sk
def api_authentication():
'''Connection to the twitter API
Args:
none
Returns:
api: api connection
'''
# Twitter API connection/authentication
auth = tweepy.OAuthHandler(sk.consumer_token, sk.consumer_secret)
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
print("Error! Failed to get request token.")
auth.set_access_token(sk.key, sk.secret)
api = tweepy.API(auth)
try:
api.verify_credentials()
except:
print("Twitter Authentication Error")
return api
def get_tweets_url_tx(api, twitter_username: str, num_tweets: int):
'''Get the tx URLs from the last tweets
Args:
api : twitter api object
twitter_username (str): Twitter username to get the urls
num_tweets (str): Number of tweets to get the urls
Returns:
tweets_urls (list): a list with the tx urls
'''
tweets = api.user_timeline(
screen_name=twitter_username, count=num_tweets)
tweets_urls = []
for tweet in tweets:
expanded_url = tweet._json["entities"]["urls"][1][
"expanded_url"]
tweets_urls.append(expanded_url)
return tweets_urls
def post_tweet_wiht_media(api, image_path: str, tweet_text: str):
'''Post a tweet wiht media attached to it (image)
Args:
api : twitter api object
image_path (str): Path to the image to attach
tweet_text (str): Text to attach to the twwet
Returns:
res_status (json): status of the tweet posted
'''
# Post the message in your Twitter Bot account
# with the image of the sold NFT attached
media = api.media_upload(image_path)
res_status = api.update_status(
status=tweet_text, media_ids=[media.media_id])
return res_status
| [
"tweepy.API",
"tweepy.OAuthHandler"
] | [((234, 292), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['sk.consumer_token', 'sk.consumer_secret'], {}), '(sk.consumer_token, sk.consumer_secret)\n', (253, 292), False, 'import tweepy\n'), ((494, 510), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (504, 510), False, 'import tweepy\n')] |
# -------------------------------------------------#
# EXERCICIO 17 #
# -------------------------------------------------#
# crie um programa que leia o comprimento do cateto oposto
# e do cateto adjacente de um triangulo retangulo, calcule
# e mostre o comprimento da hipotenusa
# codigo usando calculo simples do teorema de pitagoras
'''
co = float(input('Cateto oposto: '))
ca = float(input('Cateto adjacente: '))
hi = (co ** 2 + ca ** 2) **(1/2)
print('Hipotenusa = {:.2f}'.format(hi))
'''
# calculo usando a importacão do modulo math, função hypot (calculo da hipotenusa)
from math import hypot
co = float(input('Cateto oposto: '))
ca = float(input('Cateto adjacente: '))
#hi = hypot(co, ca)
print('Hipotenusa: {:.2f}'.format(hypot(co,ca)))
| [
"math.hypot"
] | [((768, 781), 'math.hypot', 'hypot', (['co', 'ca'], {}), '(co, ca)\n', (773, 781), False, 'from math import hypot\n')] |
import unittest
import numpy as np
import tensorflow as tf
import tensorflow_probability
import mvg_distributions.covariance_representations as cov_rep
from mvg_distributions.cholesky_wishart import CholeskyWishart
from mvg_distributions.test.test_losses_base import LossesTestBase
from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian
tfd = tensorflow_probability.distributions
tfb = tensorflow_probability.bijectors
class TestCholeskyWishart(LossesTestBase):
def setUp(self):
super().setUp()
self.x, self.x_cov_obj, self.sqrt_w_tfd, self.sqrt_w = self._create_single_sqrt_wishart_pair()
def _create_single_sqrt_wishart_pair(self, add_sparsity_correction=False):
# Create a random scale matrix for the Wishart distribution
diag_precision_prior = np.abs(np.random.normal(size=(self.batch_size, self.features_size)))
diag_precision_prior = diag_precision_prior.astype(self.dtype.as_numpy_dtype)
precision_prior = np.zeros(shape=(self.batch_size, self.features_size, self.features_size),
dtype=self.dtype.as_numpy_dtype)
for i in range(self.batch_size):
precision_prior[i][np.diag_indices_from(precision_prior[i])] = diag_precision_prior[i]
log_diag_precision_prior = np.log(diag_precision_prior)
# Create a random vector of degrees of freedom, whose values must be larger than features_size
df = np.random.uniform(low=self.features_size, high=self.features_size * 10, size=self.batch_size)
df = df.astype(self.dtype.as_numpy_dtype)
# Create a square root Wishart distribution using bijectors
wishart = tfd.Wishart(scale=precision_prior, df=df)
cholesky_bijector = tfb.Invert(tfb.CholeskyOuterProduct())
sqrt_wishart_tfd = tfd.TransformedDistribution(distribution=wishart, bijector=cholesky_bijector)
# Create our custom square root Wishart distribution with the same parameters
sqrt_wishart = CholeskyWishart(df=df, log_diag_scale=log_diag_precision_prior,
add_sparsity_correction=add_sparsity_correction)
# Create a random Cholesky matrix to test the probability density functions
_, __, x_covariance, x_weights, x_basis, log_diag = self._random_normal_params(cov_rep.PrecisionConvCholFilters)
x = np.linalg.cholesky(np.linalg.inv(x_covariance))
# Our custom square root Wishart is optimized to work with PrecisionConvCholFilters, it will measure
# the pdf of the Cholesky of the Precision
img_w = int(np.sqrt(self.features_size))
sample_shape = tf.TensorShape((self.batch_size, img_w, img_w, 1))
x_cov_obj = cov_rep.PrecisionConvCholFilters(weights_precision=tf.constant(x_weights),
filters_precision=tf.constant(x_basis),
sample_shape=sample_shape)
x_cov_obj.log_diag_chol_precision = log_diag
return x, x_cov_obj, sqrt_wishart_tfd, sqrt_wishart
def test_log_prob(self):
log_prob1 = self.sqrt_w_tfd.log_prob(self.x)
log_prob2 = self.sqrt_w.log_prob(self.x)
log_prob3 = self.sqrt_w.log_prob(self.x_cov_obj)
log_prob4 = self.sqrt_w.log_prob(self.x_cov_obj.chol_precision)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
self._asset_allclose_tf_feed(log_prob1, log_prob3)
self._asset_allclose_tf_feed(log_prob1, log_prob4)
def test_sample(self):
x1 = self.sqrt_w_tfd.sample(seed=0)
x2 = self.sqrt_w.sample(seed=0)
self._asset_allclose_tf_feed(x1, x2)
def test_sample_sparse(self):
x1_dense = self.sqrt_w_tfd.sample(seed=0)
x2_sparse = self.sqrt_w.sample_sparse(kw=3, seed=0)
x2_sparse = x2_sparse.chol_precision
# Ignore the values in x1_dense that are zero in x2_sparse
valid_mask = tf.equal(x2_sparse, tf.zeros(tf.shape(x2_sparse)))
valid_mask = tf.logical_not(valid_mask)
x1_sparse = x1_dense * tf.cast(valid_mask, x1_dense.dtype)
# FIXME
# This test fails because internally the random normal distributions have
# different sizes so the draw for the off-diagonal elements do not match
# self._asset_allclose_tf_feed(x1_sparse, x2_sparse)
# Checking that at least the diagonal part of the matrices do match
self._asset_allclose_tf_feed(tf.matrix_diag_part(x1_sparse), tf.matrix_diag_part(x2_sparse))
def test_gamma_gaussian_equivalent(self):
# Check that the Cholesky-Wishart distribution is equivalent to a SquareRootGamma-Gaussian distribution
sqrt_gamma_gaussian = SqrtGammaGaussian(df=self.sqrt_w.df, log_diag_scale=self.sqrt_w.log_diag_scale)
x_with_log_diag = tf.matrix_set_diag(self.x, self.x_cov_obj.log_diag_chol_precision)
log_prob_gg1 = sqrt_gamma_gaussian.log_prob(x_with_log_diag)
x_with_log_diag = tf.matrix_set_diag(self.x_cov_obj.chol_precision, self.x_cov_obj.log_diag_chol_precision)
log_prob_gg2 = sqrt_gamma_gaussian.log_prob(x_with_log_diag)
log_prob_wishart = self.sqrt_w.log_prob(self.x_cov_obj)
self._asset_allclose_tf_feed(log_prob_gg1, log_prob_wishart)
self._asset_allclose_tf_feed(log_prob_gg2, log_prob_wishart)
class TestCholeskyWishartConv(TestCholeskyWishart):
def _create_single_sqrt_wishart_pair(self, add_sparsity_correction=True):
return super()._create_single_sqrt_wishart_pair(add_sparsity_correction=add_sparsity_correction)
def test_log_prob(self):
# The log prob contains the sparsity correction factor, thus it won't match the one from
# the tensorflow Wishart distribution
pass
def test_gamma_gaussian_equivalent(self):
# Check that the Cholesky-Wishart distribution with the sparsity correction factor is equivalent to a
# SquareRootGamma-Gaussian distribution after removing the log probability of the zero terms in the off diagonal
sqrt_gamma_gaussian = SqrtGammaGaussian(df=self.sqrt_w.df, log_diag_scale=self.sqrt_w.log_diag_scale)
x_with_log_diag = tf.matrix_set_diag(self.x, self.x_cov_obj.log_diag_chol_precision)
log_prob1_gamma = sqrt_gamma_gaussian._log_prob_sqrt_gamma(x_with_log_diag)
log_prob1_normal = sqrt_gamma_gaussian.normal_dist.log_prob(self.x)
off_diag_mask = self.x_cov_obj.np_off_diag_mask()
log_prob1_normal = tf.reduce_sum(log_prob1_normal * off_diag_mask, axis=[1, 2])
log_prob_gg = log_prob1_gamma + log_prob1_normal
log_prob_wishart = self.sqrt_w.log_prob(self.x_cov_obj)
self._asset_allclose_tf_feed(log_prob_gg, log_prob_wishart)
def test_gamma_gaussian_equivalent_sparse(self):
# Check that the sparse Cholesky-Wishart distribution is equivalent to a
# SparseSquareRootGamma-Gaussian distribution
sqrt_gamma_gaussian = SparseSqrtGammaGaussian(df=self.sqrt_w.df, log_diag_scale=self.sqrt_w.log_diag_scale)
log_prob_gg = sqrt_gamma_gaussian.log_prob(self.x_cov_obj)
log_prob_wishart = self.sqrt_w.log_prob(self.x_cov_obj)
self._asset_allclose_tf_feed(log_prob_gg, log_prob_wishart)
if __name__ == '__main__':
unittest.main()
| [
"numpy.sqrt",
"tensorflow.matrix_diag_part",
"tensorflow.shape",
"tensorflow.logical_not",
"tensorflow.reduce_sum",
"numpy.log",
"unittest.main",
"tensorflow.cast",
"numpy.diag_indices_from",
"mvg_distributions.sqrt_gamma_gaussian.SparseSqrtGammaGaussian",
"numpy.random.normal",
"tensorflow.matrix_set_diag",
"mvg_distributions.sqrt_gamma_gaussian.SqrtGammaGaussian",
"mvg_distributions.cholesky_wishart.CholeskyWishart",
"numpy.zeros",
"numpy.linalg.inv",
"tensorflow.constant",
"numpy.random.uniform",
"tensorflow.TensorShape"
] | [((7338, 7353), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7351, 7353), False, 'import unittest\n'), ((1011, 1121), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_size, self.features_size, self.features_size)', 'dtype': 'self.dtype.as_numpy_dtype'}), '(shape=(self.batch_size, self.features_size, self.features_size),\n dtype=self.dtype.as_numpy_dtype)\n', (1019, 1121), True, 'import numpy as np\n'), ((1328, 1356), 'numpy.log', 'np.log', (['diag_precision_prior'], {}), '(diag_precision_prior)\n', (1334, 1356), True, 'import numpy as np\n'), ((1474, 1571), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.features_size', 'high': '(self.features_size * 10)', 'size': 'self.batch_size'}), '(low=self.features_size, high=self.features_size * 10,\n size=self.batch_size)\n', (1491, 1571), True, 'import numpy as np\n'), ((2029, 2145), 'mvg_distributions.cholesky_wishart.CholeskyWishart', 'CholeskyWishart', ([], {'df': 'df', 'log_diag_scale': 'log_diag_precision_prior', 'add_sparsity_correction': 'add_sparsity_correction'}), '(df=df, log_diag_scale=log_diag_precision_prior,\n add_sparsity_correction=add_sparsity_correction)\n', (2044, 2145), False, 'from mvg_distributions.cholesky_wishart import CholeskyWishart\n'), ((2680, 2730), 'tensorflow.TensorShape', 'tf.TensorShape', (['(self.batch_size, img_w, img_w, 1)'], {}), '((self.batch_size, img_w, img_w, 1))\n', (2694, 2730), True, 'import tensorflow as tf\n'), ((4061, 4087), 'tensorflow.logical_not', 'tf.logical_not', (['valid_mask'], {}), '(valid_mask)\n', (4075, 4087), True, 'import tensorflow as tf\n'), ((4763, 4842), 'mvg_distributions.sqrt_gamma_gaussian.SqrtGammaGaussian', 'SqrtGammaGaussian', ([], {'df': 'self.sqrt_w.df', 'log_diag_scale': 'self.sqrt_w.log_diag_scale'}), '(df=self.sqrt_w.df, log_diag_scale=self.sqrt_w.log_diag_scale)\n', (4780, 4842), False, 'from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian\n'), ((4870, 4936), 'tensorflow.matrix_set_diag', 'tf.matrix_set_diag', (['self.x', 'self.x_cov_obj.log_diag_chol_precision'], {}), '(self.x, self.x_cov_obj.log_diag_chol_precision)\n', (4888, 4936), True, 'import tensorflow as tf\n'), ((5033, 5127), 'tensorflow.matrix_set_diag', 'tf.matrix_set_diag', (['self.x_cov_obj.chol_precision', 'self.x_cov_obj.log_diag_chol_precision'], {}), '(self.x_cov_obj.chol_precision, self.x_cov_obj.\n log_diag_chol_precision)\n', (5051, 5127), True, 'import tensorflow as tf\n'), ((6127, 6206), 'mvg_distributions.sqrt_gamma_gaussian.SqrtGammaGaussian', 'SqrtGammaGaussian', ([], {'df': 'self.sqrt_w.df', 'log_diag_scale': 'self.sqrt_w.log_diag_scale'}), '(df=self.sqrt_w.df, log_diag_scale=self.sqrt_w.log_diag_scale)\n', (6144, 6206), False, 'from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian\n'), ((6233, 6299), 'tensorflow.matrix_set_diag', 'tf.matrix_set_diag', (['self.x', 'self.x_cov_obj.log_diag_chol_precision'], {}), '(self.x, self.x_cov_obj.log_diag_chol_precision)\n', (6251, 6299), True, 'import tensorflow as tf\n'), ((6546, 6606), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_prob1_normal * off_diag_mask)'], {'axis': '[1, 2]'}), '(log_prob1_normal * off_diag_mask, axis=[1, 2])\n', (6559, 6606), True, 'import tensorflow as tf\n'), ((7018, 7108), 'mvg_distributions.sqrt_gamma_gaussian.SparseSqrtGammaGaussian', 'SparseSqrtGammaGaussian', ([], {'df': 'self.sqrt_w.df', 'log_diag_scale': 'self.sqrt_w.log_diag_scale'}), '(df=self.sqrt_w.df, log_diag_scale=self.sqrt_w.\n log_diag_scale)\n', (7041, 7108), False, 'from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian\n'), ((837, 897), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.batch_size, self.features_size)'}), '(size=(self.batch_size, self.features_size))\n', (853, 897), True, 'import numpy as np\n'), ((2418, 2445), 'numpy.linalg.inv', 'np.linalg.inv', (['x_covariance'], {}), '(x_covariance)\n', (2431, 2445), True, 'import numpy as np\n'), ((2628, 2655), 'numpy.sqrt', 'np.sqrt', (['self.features_size'], {}), '(self.features_size)\n', (2635, 2655), True, 'import numpy as np\n'), ((4119, 4154), 'tensorflow.cast', 'tf.cast', (['valid_mask', 'x1_dense.dtype'], {}), '(valid_mask, x1_dense.dtype)\n', (4126, 4154), True, 'import tensorflow as tf\n'), ((4510, 4540), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['x1_sparse'], {}), '(x1_sparse)\n', (4529, 4540), True, 'import tensorflow as tf\n'), ((4542, 4572), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['x2_sparse'], {}), '(x2_sparse)\n', (4561, 4572), True, 'import tensorflow as tf\n'), ((1225, 1265), 'numpy.diag_indices_from', 'np.diag_indices_from', (['precision_prior[i]'], {}), '(precision_prior[i])\n', (1245, 1265), True, 'import numpy as np\n'), ((2802, 2824), 'tensorflow.constant', 'tf.constant', (['x_weights'], {}), '(x_weights)\n', (2813, 2824), True, 'import tensorflow as tf\n'), ((2897, 2917), 'tensorflow.constant', 'tf.constant', (['x_basis'], {}), '(x_basis)\n', (2908, 2917), True, 'import tensorflow as tf\n'), ((4018, 4037), 'tensorflow.shape', 'tf.shape', (['x2_sparse'], {}), '(x2_sparse)\n', (4026, 4037), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2016-12-30 10:35
from __future__ import unicode_literals
import _1327.documents.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0004_auto_20161213_2203'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='hash_value',
field=models.CharField(default=_1327.documents.models.Attachment.get_hash, max_length=40, unique=True, verbose_name='Hash value'),
),
]
| [
"django.db.models.CharField"
] | [((439, 566), 'django.db.models.CharField', 'models.CharField', ([], {'default': '_1327.documents.models.Attachment.get_hash', 'max_length': '(40)', 'unique': '(True)', 'verbose_name': '"""Hash value"""'}), "(default=_1327.documents.models.Attachment.get_hash,\n max_length=40, unique=True, verbose_name='Hash value')\n", (455, 566), False, 'from django.db import migrations, models\n')] |
from django.conf.urls import patterns, include, url
from django.views.generic.simple import direct_to_template
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$',
direct_to_template, {'template': 'index.html'},
name='index',
),
url(r'^submissao/', include('submission.urls', namespace='submission')),
url(r'^emails/', include('emails.urls', namespace='emails')),
url(r'^resultado/$', 'core.views.results', name='resultado'),
url(r'^admin/', include(admin.site.urls)),
)
| [
"django.conf.urls.include",
"django.conf.urls.url",
"django.contrib.admin.autodiscover"
] | [((145, 165), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (163, 165), False, 'from django.contrib import admin\n'), ((197, 268), 'django.conf.urls.url', 'url', (['"""^$"""', 'direct_to_template', "{'template': 'index.html'}"], {'name': '"""index"""'}), "('^$', direct_to_template, {'template': 'index.html'}, name='index')\n", (200, 268), False, 'from django.conf.urls import patterns, include, url\n'), ((438, 497), 'django.conf.urls.url', 'url', (['"""^resultado/$"""', '"""core.views.results"""'], {'name': '"""resultado"""'}), "('^resultado/$', 'core.views.results', name='resultado')\n", (441, 497), False, 'from django.conf.urls import patterns, include, url\n'), ((315, 365), 'django.conf.urls.include', 'include', (['"""submission.urls"""'], {'namespace': '"""submission"""'}), "('submission.urls', namespace='submission')\n", (322, 365), False, 'from django.conf.urls import patterns, include, url\n'), ((389, 431), 'django.conf.urls.include', 'include', (['"""emails.urls"""'], {'namespace': '"""emails"""'}), "('emails.urls', namespace='emails')\n", (396, 431), False, 'from django.conf.urls import patterns, include, url\n'), ((520, 544), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (527, 544), False, 'from django.conf.urls import patterns, include, url\n')] |
# ms_mint/io.py
import pandas as pd
import numpy as np
import io
import pymzml
from pathlib import Path as P
from datetime import date
from pyteomics import mzxml, mzml
def ms_file_to_df(fn):
fn = str(fn)
if fn.lower().endswith('.mzxml'):
df = mzxml_to_df(fn)
elif fn.lower().endswith('.mzml'):
df = mzml_to_df(fn)
elif fn.lower().endswith('hdf'):
df = pd.read_hdf(fn)
elif fn.lower().endswith('feather'):
df = pd.read_feather(fn)
# Compatibility with old schema
df = df.rename(columns={
'retentionTime': 'scan_time_min',
'intensity array': 'intensity',
'm/z array': 'mz'})
return df
def mzxml_to_df(fn):
'''
Reads mzXML file and returns a pandas.DataFrame.
'''
slices = []
with mzxml.MzXML( fn ) as ms_data:
while True:
try:
data = ms_data.next()
df = pd.DataFrame(data)
# Fix byteorder issue
df.loc[:,:] = df.values.byteswap().newbyteorder()
df = df[['num', 'msLevel', 'polarity', 'retentionTime', 'm/z array', 'intensity array']]
slices.append( df )
except StopIteration as e:
break
df = pd.concat(slices)
df['retentionTime'] = df['retentionTime'].astype(np.float32)
df['m/z array'] = df['m/z array'].astype(np.float32)
df['intensity array'] = df['intensity array'].astype(int)
df = df.rename(columns={'num': 'scan_id',
'msLevel': 'ms_level',
'retentionTime': 'scan_time_min',
'm/z array': 'mz',
'intensity array': 'intensity'})
df = df.reset_index(drop=True)
cols = ['scan_id', 'ms_level', 'polarity',
'scan_time_min', 'mz', 'intensity']
df = df[cols]
return df
def mzml_to_pandas_df_pyteomics(fn):
'''
Reads mzML file and returns a pandas.DataFrame.
'''
cols = ['retentionTime', 'm/z array', 'intensity array']
slices = []
with mzml.MzML(fn) as ms_data:
while True:
try:
data = ms_data.next()
data['retentionTime'] = data['scanList']['scan'][0]['scan time'] / 60
del data['scanList']
slices.append( pd.DataFrame(data) )
except:
break
df = pd.concat(slices)[cols]
df_to_numeric(df)
df['intensity array'] = df['intensity array'].astype(int)
df = df.reset_index(drop=True)
return df
def mzml_to_df(fn, assume_time_unit='seconds', remove_noise=False):
with pymzml.run.Reader(fn) as ms_data:
data = []
for spectrum in ms_data:
# Try to convert time units with build-in method
# some files have no time unit set. Then convert
# to minutes assuming the time unit is as set
# by assume_time_unit argument.
try:
RT = spectrum.scan_time_in_minutes()
except:
if assume_time_unit == 'seconds':
RT = spectrum.scan_time[0] / 60.
elif assume_time_unit == 'minutes':
RT = spectrum.scan_time[0]
if remove_noise: spectrum = spectrum.remove_noise()
peaks = spectrum.peaks("centroided")
data.append((spectrum.index, spectrum.ms_level, '+' if spectrum["positive scan"] else '-', RT, peaks))
ndx_explode = 4
df = pd.DataFrame(data).explode(ndx_explode)
df['mz'] = df[ndx_explode].apply(lambda x: x[0])
df['intensity'] = df[ndx_explode].apply(lambda x: x[1]).astype(int)
del df[ndx_explode]
df = df.rename(columns={0: 'scan_id', 1: 'ms_level', 2: 'polarity', 3: 'scan_time_min' })
df = df.reset_index(drop=True)
return df
def df_to_numeric(df):
'''
Converts dataframe to numeric types if possible.
'''
for col in df.columns:
df.loc[:, col] = pd.to_numeric(df[col], errors='ignore')
def export_to_excel(mint, fn=None):
date_string = str(date.today())
if fn is None:
file_buffer = io.BytesIO()
writer = pd.ExcelWriter(file_buffer)
else:
writer = pd.ExcelWriter(fn)
# Write into file
mint.peaklist.to_excel(writer, 'Peaklist', index=False)
mint.results.to_excel(writer, 'Results', index=False)
meta = pd.DataFrame({'MINT_version': [mint.version],
'Date': [date_string]}).T[0]
meta.to_excel(writer, 'Metadata', index=True, header=False)
# Close writer and maybe return file buffer
writer.close()
if fn is None:
return file_buffer.seek(0)
def convert_ms_file_to_feather(fn, fn_out=None):
fn = P(fn)
if fn_out is None:
fn_out = fn.with_suffix('.feather')
df = ms_file_to_df(fn).reset_index(drop=True)
df.to_feather(fn_out)
return fn_out | [
"pymzml.run.Reader",
"pyteomics.mzml.MzML",
"pandas.ExcelWriter",
"pyteomics.mzxml.MzXML",
"pandas.read_feather",
"pathlib.Path",
"io.BytesIO",
"pandas.to_numeric",
"pandas.DataFrame",
"datetime.date.today",
"pandas.concat",
"pandas.read_hdf"
] | [((1284, 1301), 'pandas.concat', 'pd.concat', (['slices'], {}), '(slices)\n', (1293, 1301), True, 'import pandas as pd\n'), ((4782, 4787), 'pathlib.Path', 'P', (['fn'], {}), '(fn)\n', (4783, 4787), True, 'from pathlib import Path as P\n'), ((811, 826), 'pyteomics.mzxml.MzXML', 'mzxml.MzXML', (['fn'], {}), '(fn)\n', (822, 826), False, 'from pyteomics import mzxml, mzml\n'), ((2115, 2128), 'pyteomics.mzml.MzML', 'mzml.MzML', (['fn'], {}), '(fn)\n', (2124, 2128), False, 'from pyteomics import mzxml, mzml\n'), ((2443, 2460), 'pandas.concat', 'pd.concat', (['slices'], {}), '(slices)\n', (2452, 2460), True, 'import pandas as pd\n'), ((2684, 2705), 'pymzml.run.Reader', 'pymzml.run.Reader', (['fn'], {}), '(fn)\n', (2701, 2705), False, 'import pymzml\n'), ((4026, 4065), 'pandas.to_numeric', 'pd.to_numeric', (['df[col]'], {'errors': '"""ignore"""'}), "(df[col], errors='ignore')\n", (4039, 4065), True, 'import pandas as pd\n'), ((4126, 4138), 'datetime.date.today', 'date.today', ([], {}), '()\n', (4136, 4138), False, 'from datetime import date\n'), ((4181, 4193), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4191, 4193), False, 'import io\n'), ((4211, 4238), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['file_buffer'], {}), '(file_buffer)\n', (4225, 4238), True, 'import pandas as pd\n'), ((4266, 4284), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fn'], {}), '(fn)\n', (4280, 4284), True, 'import pandas as pd\n'), ((3544, 3562), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3556, 3562), True, 'import pandas as pd\n'), ((4436, 4505), 'pandas.DataFrame', 'pd.DataFrame', (["{'MINT_version': [mint.version], 'Date': [date_string]}"], {}), "({'MINT_version': [mint.version], 'Date': [date_string]})\n", (4448, 4505), True, 'import pandas as pd\n'), ((397, 412), 'pandas.read_hdf', 'pd.read_hdf', (['fn'], {}), '(fn)\n', (408, 412), True, 'import pandas as pd\n'), ((937, 955), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (949, 955), True, 'import pandas as pd\n'), ((467, 486), 'pandas.read_feather', 'pd.read_feather', (['fn'], {}), '(fn)\n', (482, 486), True, 'import pandas as pd\n'), ((2370, 2388), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2382, 2388), True, 'import pandas as pd\n')] |
# (C) British Crown Copyright 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the Gnomonic coordinate system.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import cartopy.crs as ccrs
def check_proj4_params(crs, other_args):
expected = other_args | {'proj=gnom', 'no_defs'}
pro4_params = set(crs.proj4_init.lstrip('+').split(' +'))
assert expected == pro4_params
def test_default():
gnom = ccrs.Gnomonic()
other_args = {'ellps=WGS84', 'lon_0=0.0', 'lat_0=0.0'}
check_proj4_params(gnom, other_args)
assert_almost_equal(np.array(gnom.x_limits),
[-5e7, 5e7])
assert_almost_equal(np.array(gnom.y_limits),
[-5e7, 5e7])
@pytest.mark.parametrize('lat', [-10, 0, 10])
@pytest.mark.parametrize('lon', [-10, 0, 10])
def test_central_params(lat, lon):
gnom = ccrs.Gnomonic(central_latitude=lat, central_longitude=lon)
other_args = {'lat_0={}'.format(lat), 'lon_0={}'.format(lon),
'ellps=WGS84'}
check_proj4_params(gnom, other_args)
assert_almost_equal(np.array(gnom.x_limits),
[-5e7, 5e7])
assert_almost_equal(np.array(gnom.y_limits),
[-5e7, 5e7])
def test_grid():
# USGS Professional Paper 1395, pg 168, Table 26
globe = ccrs.Globe(ellipse=None,
semimajor_axis=1.0, semiminor_axis=1.0)
gnom = ccrs.Gnomonic(globe=globe)
geodetic = gnom.as_geodetic()
other_args = {'a=1.0', 'b=1.0', 'lon_0=0.0', 'lat_0=0.0'}
check_proj4_params(gnom, other_args)
assert_almost_equal(np.array(gnom.x_limits),
[-5e7, 5e7])
assert_almost_equal(np.array(gnom.y_limits),
[-5e7, 5e7])
lats, lons = np.mgrid[0:90:10, 0:90:10].reshape((2, -1))
expected_x = np.tile(
[0.0000, 0.1763, 0.3640, 0.5774, 0.8391, 1.1918, 1.7321, 2.7475,
5.6713],
9)
expected_y = np.array([
[5.6713, 2.7475, 1.7321, 1.1918, 0.8391, 0.5774, 0.3640, 0.1763, 0],
[5.7588, 2.7899, 1.7588, 1.2101, 0.8520, 0.5863, 0.3696, 0.1790, 0],
[6.0353, 2.9238, 1.8432, 1.2682, 0.8930, 0.6144, 0.3873, 0.1876, 0],
[6.5486, 3.1725, 2.0000, 1.3761, 0.9689, 0.6667, 0.4203, 0.2036, 0],
[7.4033, 3.5866, 2.2610, 1.5557, 1.0954, 0.7537, 0.4751, 0.2302, 0],
[8.8229, 4.2743, 2.6946, 1.8540, 1.3054, 0.8982, 0.5662, 0.2743, 0],
[11.3426, 5.4950, 3.4641, 2.3835, 1.6782, 1.1547, 0.7279, 0.3527, 0],
[16.5817, 8.0331, 5.0642, 3.4845, 2.4534, 1.6881, 1.0642, 0.5155, 0],
[32.6596, 15.8221, 9.9745, 6.8630, 4.8322, 3.3248, 2.0960, 1.0154, 0],
])[:, ::-1].T.ravel()
# Test all quadrants; they are symmetrical.
for lon_sign in [1, -1]:
for lat_sign in [1, -1]:
result = gnom.transform_points(geodetic,
lon_sign * lons, lat_sign * lats)
assert_almost_equal(result[:, 0], lon_sign * expected_x, decimal=4)
assert_almost_equal(result[:, 1], lat_sign * expected_y, decimal=4)
def test_sphere_transform():
# USGS Professional Paper 1395, pp 319 - 320
globe = ccrs.Globe(semimajor_axis=1.0, semiminor_axis=1.0,
ellipse=None)
gnom = ccrs.Gnomonic(central_latitude=40.0, central_longitude=-100.0,
globe=globe)
geodetic = gnom.as_geodetic()
other_args = {'a=1.0', 'b=1.0', 'lon_0=-100.0', 'lat_0=40.0'}
check_proj4_params(gnom, other_args)
assert_almost_equal(np.array(gnom.x_limits),
[-5e7, 5e7])
assert_almost_equal(np.array(gnom.y_limits),
[-5e7, 5e7])
result = gnom.transform_point(-110.0, 30.0, geodetic)
assert_almost_equal(result, np.array([-0.1542826, -0.1694739]))
inverse_result = geodetic.transform_point(result[0], result[1], gnom)
assert_almost_equal(inverse_result, [-110.0, 30.0])
| [
"numpy.tile",
"pytest.mark.parametrize",
"numpy.testing.assert_almost_equal",
"numpy.array",
"cartopy.crs.Gnomonic",
"cartopy.crs.Globe"
] | [((1469, 1513), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lat"""', '[-10, 0, 10]'], {}), "('lat', [-10, 0, 10])\n", (1492, 1513), False, 'import pytest\n'), ((1515, 1559), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lon"""', '[-10, 0, 10]'], {}), "('lon', [-10, 0, 10])\n", (1538, 1559), False, 'import pytest\n'), ((1177, 1192), 'cartopy.crs.Gnomonic', 'ccrs.Gnomonic', ([], {}), '()\n', (1190, 1192), True, 'import cartopy.crs as ccrs\n'), ((1606, 1664), 'cartopy.crs.Gnomonic', 'ccrs.Gnomonic', ([], {'central_latitude': 'lat', 'central_longitude': 'lon'}), '(central_latitude=lat, central_longitude=lon)\n', (1619, 1664), True, 'import cartopy.crs as ccrs\n'), ((2062, 2126), 'cartopy.crs.Globe', 'ccrs.Globe', ([], {'ellipse': 'None', 'semimajor_axis': '(1.0)', 'semiminor_axis': '(1.0)'}), '(ellipse=None, semimajor_axis=1.0, semiminor_axis=1.0)\n', (2072, 2126), True, 'import cartopy.crs as ccrs\n'), ((2161, 2187), 'cartopy.crs.Gnomonic', 'ccrs.Gnomonic', ([], {'globe': 'globe'}), '(globe=globe)\n', (2174, 2187), True, 'import cartopy.crs as ccrs\n'), ((2578, 2663), 'numpy.tile', 'np.tile', (['[0.0, 0.1763, 0.364, 0.5774, 0.8391, 1.1918, 1.7321, 2.7475, 5.6713]', '(9)'], {}), '([0.0, 0.1763, 0.364, 0.5774, 0.8391, 1.1918, 1.7321, 2.7475, 5.6713], 9\n )\n', (2585, 2663), True, 'import numpy as np\n'), ((3933, 3997), 'cartopy.crs.Globe', 'ccrs.Globe', ([], {'semimajor_axis': '(1.0)', 'semiminor_axis': '(1.0)', 'ellipse': 'None'}), '(semimajor_axis=1.0, semiminor_axis=1.0, ellipse=None)\n', (3943, 3997), True, 'import cartopy.crs as ccrs\n'), ((4032, 4107), 'cartopy.crs.Gnomonic', 'ccrs.Gnomonic', ([], {'central_latitude': '(40.0)', 'central_longitude': '(-100.0)', 'globe': 'globe'}), '(central_latitude=40.0, central_longitude=-100.0, globe=globe)\n', (4045, 4107), True, 'import cartopy.crs as ccrs\n'), ((4654, 4705), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['inverse_result', '[-110.0, 30.0]'], {}), '(inverse_result, [-110.0, 30.0])\n', (4673, 4705), False, 'from numpy.testing import assert_almost_equal\n'), ((1318, 1341), 'numpy.array', 'np.array', (['gnom.x_limits'], {}), '(gnom.x_limits)\n', (1326, 1341), True, 'import numpy as np\n'), ((1404, 1427), 'numpy.array', 'np.array', (['gnom.y_limits'], {}), '(gnom.y_limits)\n', (1412, 1427), True, 'import numpy as np\n'), ((1830, 1853), 'numpy.array', 'np.array', (['gnom.x_limits'], {}), '(gnom.x_limits)\n', (1838, 1853), True, 'import numpy as np\n'), ((1916, 1939), 'numpy.array', 'np.array', (['gnom.y_limits'], {}), '(gnom.y_limits)\n', (1924, 1939), True, 'import numpy as np\n'), ((2351, 2374), 'numpy.array', 'np.array', (['gnom.x_limits'], {}), '(gnom.x_limits)\n', (2359, 2374), True, 'import numpy as np\n'), ((2437, 2460), 'numpy.array', 'np.array', (['gnom.y_limits'], {}), '(gnom.y_limits)\n', (2445, 2460), True, 'import numpy as np\n'), ((4300, 4323), 'numpy.array', 'np.array', (['gnom.x_limits'], {}), '(gnom.x_limits)\n', (4308, 4323), True, 'import numpy as np\n'), ((4386, 4409), 'numpy.array', 'np.array', (['gnom.y_limits'], {}), '(gnom.y_limits)\n', (4394, 4409), True, 'import numpy as np\n'), ((4539, 4573), 'numpy.array', 'np.array', (['[-0.1542826, -0.1694739]'], {}), '([-0.1542826, -0.1694739])\n', (4547, 4573), True, 'import numpy as np\n'), ((3693, 3760), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result[:, 0]', '(lon_sign * expected_x)'], {'decimal': '(4)'}), '(result[:, 0], lon_sign * expected_x, decimal=4)\n', (3712, 3760), False, 'from numpy.testing import assert_almost_equal\n'), ((3773, 3840), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result[:, 1]', '(lat_sign * expected_y)'], {'decimal': '(4)'}), '(result[:, 1], lat_sign * expected_y, decimal=4)\n', (3792, 3840), False, 'from numpy.testing import assert_almost_equal\n'), ((2706, 3369), 'numpy.array', 'np.array', (['[[5.6713, 2.7475, 1.7321, 1.1918, 0.8391, 0.5774, 0.364, 0.1763, 0], [\n 5.7588, 2.7899, 1.7588, 1.2101, 0.852, 0.5863, 0.3696, 0.179, 0], [\n 6.0353, 2.9238, 1.8432, 1.2682, 0.893, 0.6144, 0.3873, 0.1876, 0], [\n 6.5486, 3.1725, 2.0, 1.3761, 0.9689, 0.6667, 0.4203, 0.2036, 0], [\n 7.4033, 3.5866, 2.261, 1.5557, 1.0954, 0.7537, 0.4751, 0.2302, 0], [\n 8.8229, 4.2743, 2.6946, 1.854, 1.3054, 0.8982, 0.5662, 0.2743, 0], [\n 11.3426, 5.495, 3.4641, 2.3835, 1.6782, 1.1547, 0.7279, 0.3527, 0], [\n 16.5817, 8.0331, 5.0642, 3.4845, 2.4534, 1.6881, 1.0642, 0.5155, 0], [\n 32.6596, 15.8221, 9.9745, 6.863, 4.8322, 3.3248, 2.096, 1.0154, 0]]'], {}), '([[5.6713, 2.7475, 1.7321, 1.1918, 0.8391, 0.5774, 0.364, 0.1763, 0\n ], [5.7588, 2.7899, 1.7588, 1.2101, 0.852, 0.5863, 0.3696, 0.179, 0], [\n 6.0353, 2.9238, 1.8432, 1.2682, 0.893, 0.6144, 0.3873, 0.1876, 0], [\n 6.5486, 3.1725, 2.0, 1.3761, 0.9689, 0.6667, 0.4203, 0.2036, 0], [\n 7.4033, 3.5866, 2.261, 1.5557, 1.0954, 0.7537, 0.4751, 0.2302, 0], [\n 8.8229, 4.2743, 2.6946, 1.854, 1.3054, 0.8982, 0.5662, 0.2743, 0], [\n 11.3426, 5.495, 3.4641, 2.3835, 1.6782, 1.1547, 0.7279, 0.3527, 0], [\n 16.5817, 8.0331, 5.0642, 3.4845, 2.4534, 1.6881, 1.0642, 0.5155, 0], [\n 32.6596, 15.8221, 9.9745, 6.863, 4.8322, 3.3248, 2.096, 1.0154, 0]])\n', (2714, 3369), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""An output module that doesn't output anything."""
from plaso.output import interface
from plaso.output import manager
# We don't need to implement any functionality here, so the methods in
# the interface don't need to be overwritten.
# pylint: disable=abstract-method
class NullOutputModule(interface.OutputModule):
"""An output module that doesn't output anything."""
NAME = u'null'
DESCRIPTION = u'An output module that doesn\'t output anything.'
manager.OutputManager.RegisterOutput(NullOutputModule)
| [
"plaso.output.manager.OutputManager.RegisterOutput"
] | [((488, 542), 'plaso.output.manager.OutputManager.RegisterOutput', 'manager.OutputManager.RegisterOutput', (['NullOutputModule'], {}), '(NullOutputModule)\n', (524, 542), False, 'from plaso.output import manager\n')] |
import time
import os
import hashlib
from collections import Counter
def profiler(method):
def wrapper_method(*arg, **kw):
t = time.time()
method(*arg, **kw)
print('Method ' + method.__name__ + ' took : ' +
"{:2.5f}".format(time.time()-t) + ' sec')
return wrapper_method
@profiler
def part1():
inp = 'abbhdwsy'
code = []
inc = 1
while len(code) < 8:
tmp = inp + str(inc)
hash = hashlib.md5(bytes(tmp, 'utf-8')).hexdigest()
if hash[0] == hash[1] == hash[2] == hash[3] == hash[4] == '0':
code.append(hash[5])
inc += 1
print(''.join(code))
@profiler
def part2():
inp = 'abbhdwsy'
code = ['-'] * 8
inc = 1
while code.count('-') > 0:
tmp = inp + str(inc)
hash = hashlib.md5(bytes(tmp, 'utf-8')).hexdigest()
if hash[0] == hash[1] == hash[2] == hash[3] == hash[4] == '0':
if int(hash[5],16) < 8:
if code[int(hash[5],16)] == '-':
code[int(hash[5],16)] = hash[6]
inc += 1
print(''.join(code))
if __name__ == "__main__":
part1()
part2()
| [
"time.time"
] | [((141, 152), 'time.time', 'time.time', ([], {}), '()\n', (150, 152), False, 'import time\n'), ((268, 279), 'time.time', 'time.time', ([], {}), '()\n', (277, 279), False, 'import time\n')] |
from sanic import Blueprint
from server.api.app import app_api
from server.api.device import device_api
api = Blueprint.group(
app_api,
device_api
)
| [
"sanic.Blueprint.group"
] | [((111, 147), 'sanic.Blueprint.group', 'Blueprint.group', (['app_api', 'device_api'], {}), '(app_api, device_api)\n', (126, 147), False, 'from sanic import Blueprint\n')] |
"""Test accuracy of the level 4 mixture modules.
This module provides tests of the accuracy of the level 4 mixture
modules. The prefixes of these modules list the substances in
equilibrium; ``liqvap4`` is for pure liquid water and pure water vapour,
whereas ``iceair4b`` is for ice and humid air. It also includes one
level 5 module, ``iceflu5``.
This module can also be called from the command line as::
python testmix.py arg1 arg2 ...
The arguments can be module names, parts of module names, or substance
prefixes. This will run all available tests for modules that contain the
arguments in their name. For example, ``iceair4`` will test the three
modules ``iceair4a``, ``iceair4b``, ``iceair4c``; ``sea`` will test all
modules for which seawater is a component. If no arguments are given,
all available tests are run.
The functions provided by this module generate the tests for the mixture
module of the same name. Each function returns a tuple of
:class:`~teospy.tests.tester.Tester` instances which include the
functions checked, values of the arguments, and tables of reference
values. Use the ``run`` method of a Tester to run the test, and
``printresults`` to print a summary.
:Available modules to test:
* :mod:`~teospy.liqvap4`
* :mod:`~teospy.iceliq4`
* :mod:`~teospy.icevap4`
* :mod:`~teospy.iceair4a`
* :mod:`~teospy.iceair4b`
* :mod:`~teospy.iceair4c`
* :mod:`~teospy.liqair4a`
* :mod:`~teospy.liqair4b`
* :mod:`~teospy.liqair4c`
* :mod:`~teospy.liqiceair4`
* :mod:`~teospy.sealiq4`
* :mod:`~teospy.seavap4`
* :mod:`~teospy.seaice4`
* :mod:`~teospy.seaicevap4`
* :mod:`~teospy.seaair4`
* :mod:`~teospy.iceflu5`
"""
__all__ = ['genliqvap4','geniceliq4','genicevap4','geniceair4a','geniceair4b',
'geniceair4c','genliqair4a','genliqair4b','genliqair4c','genliqiceair4',
'gensealiq4','genseavap4','genseaice4','genseaicevap4','genseaair4',
'geniceflu5']
import warnings
from teospy.tests.tester import Tester, _DERS3
## Generating Tester instances
def genliqvap4():
"""Generate liqvap4 Testers.
"""
from teospy import liqvap4
funs = [liqvap4.pressure,liqvap4.densityvap,liqvap4.densityliq,
liqvap4.chempot,liqvap4.entropyliq,liqvap4.entropyvap,
liqvap4.enthalpyliq,liqvap4.enthalpyvap,liqvap4.volumeevap,
liqvap4.entropyevap,liqvap4.enthalpyevap]
fargs = tuple()
fkwargs = {'temp': 300.}
refs = [3536.80675227,2.55896736829e-2,996.513027468,-5361.84908682,
393.089029801,8517.38650061,112564.859854,2549854.10109,39.0772595686,
8124.29747080,2437289.24124]
fnames = ['pressure','densityvap','densityliq','chempot','entropyliq',
'entropyvap','enthalpyliq','enthalpyvap','volumeevap','entropyevap',
'enthalpyevap']
argfmt = '({0:s}={1:3g})'
header = 'Liquid-vapour equilibrium at temp'
eqfun = liqvap4.eq_tp
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['temp','pres','<KEY>']
test_t = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys)
funs = [liqvap4.temperature,liqvap4.densityvap,liqvap4.densityliq,
liqvap4.chempot,liqvap4.entropyliq,liqvap4.entropyvap,
liqvap4.enthalpyliq,liqvap4.enthalpyvap,liqvap4.volumeevap,
liqvap4.entropyevap,liqvap4.enthalpyevap]
fargs = tuple()
fkwargs = {'pres': 1e4}
refs = [318.956328924,6.81657223094e-2,989.833275365,-15259.1024273,
649.195605196,8148.82019425,191805.944559,2583858.67179,14.6691196141,
7499.62458905,2392052.72723]
fnames = ['temperature','densityvap','densityliq','chempot','entropyliq',
'entropyvap','enthalpyliq','enthalpyvap','volumeevap','entropyevap',
'enthalpyevap']
argfmt = '({0:s}={1:5g})'
header = 'Liquid-vapour equilibrium at pres'
eqfun = liqvap4.eq_tp
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['temp','pres','dvap','dliq']
test_p = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys)
return (test_t, test_p)
def geniceliq4():
"""Generate iceliq4 Testers.
"""
from teospy import iceliq4
funs = [iceliq4.pressure,iceliq4.densityliq,iceliq4.chempot,
iceliq4.densityice,iceliq4.enthalpyice,iceliq4.enthalpyliq,
iceliq4.enthalpymelt,iceliq4.entropyice,iceliq4.entropyliq,
iceliq4.entropymelt,iceliq4.volumemelt]
fargs = tuple()
fkwargs = {'temp': 270.}
refs = [39313338.8825,1019.05568894,38870.0605192,921.359428514,
-299055.938629,26110.7481094,325166.686739,-1251.57777462,
-47.2567126291,1204.32106199,-1.04052121182e-4]
fnames = ['pressure','densityliq','chempot','densityice','enthalpyice',
'enthalpyliq','enthalpymelt','entropyice','entropyliq','entropymelt',
'volumemelt']
argfmt = '({0:s}={1:3g})'
header = 'Ice-liquid equilibrium at temp'
eqfun = iceliq4.eq_tp
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['temp','pres','dliq']
test_t = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys)
funs = [iceliq4.temperature,iceliq4.densityliq,iceliq4.chempot,
iceliq4.densityice,iceliq4.enthalpyice,iceliq4.enthalpyliq,
iceliq4.enthalpymelt,iceliq4.entropyice,iceliq4.entropyliq,
iceliq4.entropymelt,iceliq4.volumemelt]
fargs = tuple()
fkwargs = {'pres': 1e7}
refs = [272.401648869,1004.79353660,9972.88171381,917.896690831,
-324602.983812,6945.92700483,331548.910817,-1228.24464139,
-11.1121012723,1217.13254011,-9.42178903288e-5]
fnames = ['temperature','densityliq','chempot','densityice','enthalpyice',
'enthalpyliq','enthalpymelt','entropyice','entropyliq','entropymelt',
'volumemelt']
argfmt = '({0:s}={1:5g})'
header = 'Ice-liquid equilibrium at pres'
eqfun = iceliq4.eq_tp
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['temp','pres','dliq']
test_p = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys)
return (test_t,test_p)
def genicevap4():
"""Generate icevap4 Testers.
"""
from teospy import icevap4
funs = [icevap4.pressure,icevap4.densityvap,icevap4.chempot,
icevap4.densityice,icevap4.enthalpyice,icevap4.enthalpyvap,
icevap4.entropyice,icevap4.entropyvap,icevap4.volumesubl,
icevap4.entropysubl,icevap4.enthalpysubl]
fargs = tuple()
fkwargs = {'temp': 270.}
refs = [470.059067981,3.77406140772e-3,-3895.26747392,917.170465733,
-340033.434649,2495132.21977,-1244.95617472,9255.65736018,264.965451558,
10500.6135349,2835165.65442]
fnames = ['pressure','densityvap','chempot','densityice','enthalpyice',
'enthalpyvap','entropyice','entropyvap','volumesubl','entropysubl',
'enthalpysubl']
argfmt = '({0:s}={1:3g})'
header = 'Ice-vapour equilibrium at temp'
eqfun = icevap4.eq_tp
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['temp','pres','dvap']
test_t = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys)
funs = [icevap4.temperature,icevap4.densityvap,icevap4.chempot,
icevap4.densityice,icevap4.enthalpyice,icevap4.enthalpyvap,
icevap4.entropyice,icevap4.entropyvap,icevap4.volumesubl,
icevap4.entropysubl,icevap4.enthalpysubl]
fargs = tuple()
fkwargs = {'pres': 100.}
refs = [252.817910215,8.57185487853e-4,-26421.2820403,919.600269745,
-374576.247867,2463525.19629,-1377.09771247,9848.77406912,1166.60755699,
11225.8717816,2838101.44416]
fnames = ['temperature','densityvap','chempot','densityice','enthalpyice',
'enthalpyvap','entropyice','entropyvap','volumesubl','entropysubl',
'enthalpysubl']
argfmt = '({0:s}={1:3g})'
header = 'Ice-vapour equilibrium at pres'
eqfun = icevap4.eq_tp
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['temp','pres','dvap']
test_p = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys)
return (test_t, test_p)
def geniceair4a():
"""Generate iceair4a Testers.
"""
from teospy import iceair4a
funs = [iceair4a.enthalpysubl,iceair4a.densityair,iceair4a.densityvap,
iceair4a.densityice]
fargs = tuple()
fkwargs = {'temp': 270., 'pres': 1e5}
refs = [2833397.47158,1.28880078014,3.79055033080e-3,917.181167192]
refs_alt = [2833359.27614,None,None,None]
fnames = ['enthalpysubl','densityair','densityvap','densityice']
argfmt = '({0:s}={1:3g},{2:s}={3:6g})'
header = 'Icy air at temp and pres'
eqfun = iceair4a.eq_atpe
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['airf','temp','pres','dhum']
test_tp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [iceair4a.pressure,iceair4a.enthalpysubl,iceair4a.densityair,
iceair4a.densityvap,iceair4a.densityice]
fkwargs = {'airf': 0.997, 'temp': 270.}
refs = [98034.4511233,2833421.40560,1.26340801028,3.79022403085e-3,
917.180955861]
refs_alt = [None,2833386.54980,None,None,None]
fnames = ['pressure','enthalpysubl','densityair','densityvap','densityice']
argfmt = '({0:s}={1:5.3f},{2:s}={3:3g})'
header = 'Icy air at airf and temp'
eqkwargs = fkwargs
test_at = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [iceair4a.temperature,iceair4a.enthalpysubl,iceair4a.densityair,
iceair4a.densityvap,iceair4a.densityice]
fkwargs = {'airf': 0.997, 'pres': 1e5}
refs = [270.234816126,2833334.55263,1.28763121402,3.86289364206e-3,
917.147060527]
refs_alt = [270.232024746,2833296.51317,None,None,None]
fnames = ['temperature','enthalpysubl','densityair','densityvap',
'densityice']
argfmt = '({0:s}={1:5.3f},{2:s}={3:6g})'
header = 'Icy air at airf and temp'
eqkwargs = fkwargs
test_ap = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [iceair4a.enthalpysubl,iceair4a.densityair,iceair4a.densityvap,
iceair4a.pressure,iceair4a.temperature,iceair4a.densityice]
fkwargs = {'airf': 0.997, 'entr': 100.}
refs = [2834605.61524,0.949325026119,2.84797507836e-3,72721.4579415,
266.514349350,917.681749114]
refs_alt = [2834612.42351,None,None,None,None,None]
fnames = ['enthalpysubl','densityair','densityvap','pressure','temperature',
'densityice']
argfmt = '({0:s}={1:5.3f},{2:s}={3:3g})'
header = 'Icy air at airf and entr'
eqkwargs = fkwargs
test_ae = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = iceair4a.condensationpressure
fargs = (0.997,270.)
refs = 98034.4511233
fnames = 'condensationpressure'
argfmt = '({0:5.3f},{1:3g})'
header = 'Condensation pressure'
test_cp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = iceair4a.frostpoint
fargs = (0.997,1e5)
refs = 270.234816126
fnames = 'frostpoint'
argfmt = '({0:5.3f},{1:6g})'
header = 'Frost point'
test_fp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = iceair4a.massfractionair
fargs = (270.,1e5)
refs = 0.997058854720
fnames = 'massfractionair'
argfmt = '({0:3g},{1:6g})'
header = 'Dry fraction'
test_mf = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = iceair4a.sublimationpressure
fargs = (270.,1e5)
refs = 472.041823975
fnames = 'sublimationpressure'
argfmt = '({0:3g},{1:6g})'
header = 'Sublimation pressure'
test_sp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [iceair4a.ict,iceair4a.icl]
fargs = (0.997,300.,1e5)
refs = [265.224998411,64988.3931838]
fnames = ['ict','icl']
argfmt = '({0:5.3f},{1:3g},{2:6g})'
header = 'ICL functions'
eqfun = iceair4a.eq_icl
eqargs = fargs
eqkeys = ['dhum','ticl','picl','dhicl']
test_icl = Tester(funs,fargs,refs,fnames,argfmt,header=header,
eqfun=eqfun,eqargs=eqargs,eqkeys=eqkeys)
funs = [iceair4a.rhfromairf_cct,iceair4a.rhfromairf_wmo]
fargs = (0.998,270.,1e5)
refs = [0.680395740553,0.679365943331]
fnames = ['rhfromairf_cct','rhfromairf_wmo']
argfmt = '({0:5.3f},{1:3g},{2:6g})'
header = 'RH from airf'
test_rh1 = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [iceair4a.airffromrh_cct,iceair4a.airffromrh_wmo]
fargs = (0.8,270.,1e5)
refs = [0.997647924743,0.997645698908]
fnames = ['airffromrh_cct','airffromrh_wmo']
argfmt = '({0:3.1f},{1:3g},{2:6g})'
header = 'airf from RH'
test_rh2 = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (test_tp,test_at,test_ap,test_ae,test_cp,test_fp,test_mf,test_sp,
test_icl,test_rh1,test_rh2)
def geniceair4b():
"""Generate iceair4b Testers.
"""
from teospy import iceair4b
funs = iceair4b.iceair_g
args1 = (0.5,270.,1e5)
fargs = [(der+args1) for der in _DERS3]
refs = [-2595.57166634,2382.35592988,610.264516719,0.389645501224,0.0,
-1269.41767669,0.777110408175,-7.01196501618,1.60141530565e-3,
-3.91183988658e-6]
refs_alt = [-2595.57166634,None,610.264515318,None,None,-1269.41767949,None,
-7.00810930740,1.60095965101e-3,-3.91178603885e-6]
fnames = 'iceair_g'
argfmt = '({0:1d},{1:1d},{2:1d},{3:3.1f},{4:3g},{5:6g})'
header = 'Icy air Gibbs derivatives'
eqfun = iceair4b._eq_atpe
eqargs = tuple()
eqkwargs = {'temp': 270., 'pres': 1e5}
eqkeys = ['airf','temp','pres','dhum']
keepkeys = ['airf','dhum']
test_ders = Tester(funs,fargs,refs,fnames,argfmt,header=header,
eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,eqkeys=eqkeys,
keepkeys=keepkeys,refs_alt=refs_alt)
funs = [iceair4b.solidfraction,iceair4b.vapourfraction,iceair4b.cp,
iceair4b.density,iceair4b.enthalpy,iceair4b.entropy,iceair4b.expansion,
iceair4b.kappa_t,iceair4b.lapserate]
fargs = args1
refs = [0.498525089434,1.47491056602e-3,1893.23055437,2.5664353800,
-167366.990802,-610.264515318,4.10992890025e-3,1.00394842901e-5,
2.28383245895e-4]
refs_alt = [None,None,1892.18951300,None,None,None,4.10875949031e-3,
1.00393460891e-5,2.28443875629e-4]
fnames = ['solidfraction','vapourfraction','cp','density','enthalpy',
'entropy','expansion','kappa_t','lapserate']
argfmt = '({0:3.1f},{1:3g},{2:6g})'
header = 'Icy air Gibbs functions'
test_funs = Tester(funs,fargs,refs,fnames,argfmt,header=header,
eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,eqkeys=eqkeys,
keepkeys=keepkeys,refs_alt=refs_alt)
return (test_ders, test_funs)
def geniceair4c():
"""Generate iceair4c Testers.
"""
from teospy import iceair4c
funs = iceair4c.iceair_h
args1 = (0.5,1e5)
fargs = [(der+args1) for der in _DERS3]
fkwargs = {'entr': -600.}
refs = [-164588.106002,543.016647638,271.449994437,0.391981878510,
224806.061923,-177.336808301,0.494222328196,0.139890994527,
2.26840108532e-4,-3.56982287084e-6]
refs_alt = [None,543.016638396,None,None,224958.525864,-177.457078495,
0.781782661019,0.139985868894,2.26912930199e-4,-3.56976697603e-6]
fnames = 'iceair_h'
argfmt = '({0:1d},{1:1d},{2:1d},{3:3g},{4:6g},{5:s}={6:4g})'
header = 'Icy air enthalpy derivatives'
eqfun = iceair4c.eq_wpte
eqargs = args1
eqkwargs = fkwargs
eqkeys = ['airf','temp','dhum']
test_der = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [iceair4c.temperature,iceair4c.lapserate,iceair4c.cp,
iceair4c.kappa_s,iceair4c.density]
fargs = (0.9,1e5)
fkwargs = {'entr': -100.}
refs = [270.383680133,4.42092568190e-4,1768.51439675,8.23141751514e-6,
1.42531895986]
refs_alt = [270.383680119,4.42457786755e-4,1766.52051488,8.23031581047e-6,
1.42531895993]
fnames = ['temperature','lapserate','cp','kappa_s','density']
argfmt = '({0:3g},{1:6g},{2:s}={3:4g})'
header = 'Icy air enthalpy functions'
eqargs = fargs
eqkwargs = fkwargs
test_fun = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [iceair4c.pottemp,iceair4c.potdensity,iceair4c.potenthalpy]
fargs = (0.9,230.,5e4,1e5)
refs = [266.105208871,1.45048110422,-35781.2564451]
fnames = ['pottemp','potdensity','potenthalpy']
argfmt = '({0:3g},{1:3g},{2:5g},{3:6g})'
header = 'Icy air potential functions'
eqfun = iceair4c.eq_pot
eqargs = fargs
eqkeys = ['airf','dhum','apot','tpot','dhpot']
test_pot = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=eqargs,eqkeys=eqkeys)
return (test_der, test_fun, test_pot)
def genliqair4a():
"""Generate liqair4a Testers.
"""
from teospy import liqair4a
funs = [liqair4a.enthalpyevap,liqair4a.entropy,liqair4a.densityair,
liqair4a.densityvap,liqair4a.densityliq]
fargs = tuple()
fkwargs = {'temp': 300., 'pres': 1e5}
refs = [2434606.28954,296.711483507,1.14614215827,2.56669393257e-2,
996.556340389]
refs_alt = [2434585.53919,None,None,None,None]
fnames = ['enthalpyevap','entropy','densityair','densityvap','densityliq']
argfmt = '({0:s}={1:3g},{2:s}={3:6g})'
header = 'Wet air at temp and pres'
eqfun = liqair4a.eq_atpe
eqargs = fargs
eqkwargs = fkwargs
eqkeys = ['airf','temp','pres','dhum','dliq']
test_tp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = [liqair4a.enthalpyevap,liqair4a.entropy,liqair4a.temperature,
liqair4a.densityair,liqair4a.densityvap,liqair4a.densityliq]
fkwargs = {'airf': 0.99, 'pres': 1e5}
refs = [2465683.43515,145.863545194,287.078299795,1.20675806022,
0.0120675806022,999.256685197]
refs_alt = [2465656.38630,None,None,None,None,None]
fnames = ['enthalpyevap','entropy','temperature','densityair','densityvap',
'densityliq']
argfmt = '({0:s}={1:4g},{2:s}={3:6g})'
header = 'Wet air at airf and pres'
test_ap = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = [liqair4a.enthalpyevap,liqair4a.entropy,liqair4a.pressure,
liqair4a.densityair,liqair4a.densityvap,liqair4a.densityliq]
fkwargs = {'airf': 0.99, 'temp': 300.}
refs = [2433303.92095,-41.9991507402,223057.741750,2.57657653270,
2.57657653270e-2,996.611581662]
refs_alt = [2433111.29696,None,None,None,None,None]
fnames = ['enthalpyevap','entropy','pressure','densityair','densityvap'
,'densityliq']
argfmt = '({0:s}={1:4g},{2:s}={3:3g})'
header = 'Wet air at airf and temp'
test_at = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = [liqair4a.enthalpyevap,liqair4a.temperature,liqair4a.pressure,
liqair4a.densityair,liqair4a.densityvap,liqair4a.densityliq]
fkwargs = {'airf': 0.99, 'entr': 100.}
refs = [2458179.89837,290.107386673,121546.373652,1.45154665083,
1.45154665083e-2,998.794738784]
refs_alt = [2458121.74961,None,None,None,None,None]
fnames = ['enthalpyevap','temperature','pressure','densityair','densityvap',
'densityliq']
argfmt = '({0:s}={1:4g},{2:s}={3:3g})'
header = 'Wet air at airf and entr'
test_ae = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = liqair4a.condensationpressure
fargs = (0.9,300.)
refs = 23381.2332935
fnames = 'condensationpressure'
argfmt = '({0:3g},{1:3g})'
header = 'Wet air condensation pressure'
test_cp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = liqair4a.massfractionair
fargs = (300.,1e5)
refs = 0.977605797727
fnames = 'massfractionair'
argfmt = '({0:3g},{1:6g})'
header = 'Wet air dry fraction'
test_mf = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = liqair4a.dewpoint
fargs = (0.99,1e5)
refs = 287.078299795
fnames = 'dewpoint'
argfmt = '({0:4g},{1:6g})'
header = 'Wet air dew point'
test_dp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [liqair4a.ict,liqair4a.icl]
fargs = (0.99,300.,1e5)
refs = [284.200207629,82723.6047631]
fnames = ['ict','icl']
argfmt = '({0:4g},{1:3g},{2:6g})'
header = 'Wet air ICL functions'
eqfun = liqair4a.eq_icl
eqkeys = ['dhum','ticl','dhicl','dlicl']
test_icl = Tester(funs,fargs,refs,fnames,argfmt,header=header,
eqfun=eqfun,eqargs=fargs,eqkeys=eqkeys)
funs = [liqair4a.rhfromairf_cct,liqair4a.rhfromairf_wmo]
fargs = (0.99,300.,1e5)
refs = [0.449887886959,0.440953686019]
fnames = ['rhfromairf_cct','rhfromairf_wmo']
argfmt = '({0:4g},{1:3g},{2:6g})'
header = 'Wet air RH from airf'
test_rh1 = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [liqair4a.airffromrh_cct,liqair4a.airffromrh_wmo]
fargs = (0.8,300.,1e5)
refs = [0.982133277948,0.982004037135]
fnames = ['airffromrh_cct','airffromrh_wmo']
argfmt = '({0:3g},{1:3g},{2:6g})'
header = 'Wet air airf from RH'
test_rh2 = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (test_tp,test_ap,test_at,test_ae,test_cp,test_mf,test_dp,test_icl,
test_rh1,test_rh2)
def genliqair4b():
"""Generate liqair4b Testers.
"""
from teospy import liqair4b
funs = liqair4b.liqair_g
args1 = (0.5,300.,1e5)
fargs = [(der+args1) for der in _DERS3]
refs = [-5396.77820137,-263.455491203,-343.783393872,0.446729465555,0.,
98.5580798842,0.891452019991,-14.2265223683,2.45335972867e-3,
-4.62725155875e-6]
refs_alt = [None,None,None,None,None,None,None,-14.0995955397,
2.43183979422e-3,-4.62360294023e-6]
fnames = 'liqair_g'
argfmt = '({0:1d},{1:1d},{2:1d},{3:3g},{4:3g},{5:6g})'
header = 'Wet air Gibbs derivatives'
eqfun = liqair4b._eq_atpe
eqargs = tuple()
eqkwargs = {'temp': 300., 'pres': 1e5}
eqkeys = ['airf','temp','pres','dhum','dliq']
keepkeys = ['airf','dhum','dliq']
test_der = Tester(funs,fargs,refs,fnames,argfmt,header=header,
eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,eqkeys=eqkeys,
keepkeys=keepkeys,refs_alt=refs_alt)
funs = [liqair4b.cp,liqair4b.density,liqair4b.enthalpy,liqair4b.entropy,
liqair4b.expansion,liqair4b.kappa_t,liqair4b.lapserate,
liqair4b.liquidfraction,liqair4b.vapourfraction]
fargs = args1
refs = [4267.95671050,2.23849125053,97738.2399604,343.783393872,
5.49182428703e-3,1.03580621283e-5,1.72449715057e-4,0.488546404734,
1.14535952655e-2]
refs_alt = [4229.87866191,None,None,None,5.44365210207e-3,1.03498947276e-5,
1.72475854884e-4,None,None]
fnames = ['cp','density','enthalpy','entropy','expansion','kappa_t',
'lapserate','liquidfraction','vapourfraction']
argfmt = '({0:3g},{1:3g},{2:6g})'
header = 'Wet air Gibbs functions'
test_fun = Tester(funs,fargs,refs,fnames,argfmt,header=header,
eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,eqkeys=eqkeys,
keepkeys=keepkeys,refs_alt=refs_alt)
return (test_der, test_fun)
def genliqair4c():
"""Generate liqair4c Testers.
"""
from teospy import liqair4c
funs = liqair4c.liqair_h
args1 = (0.5,1e5)
fargs = [der+args1 for der in _DERS3]
fkwargs = {'entr': 100.}
refs = [26898.5215492,-1681.79366113,280.393544899,0.406872930019,
35.7288882498,1.78399786454,0.814851029626,0.0890777333582,
1.550566407026e-4,-3.83770233859e-6]
refs_alt = [None,None,None,None,35.7689708915,1.78599925196,0.811745643965,
8.91776656830e-2,1.55067379031e-4,-3.83770118470e-6]
fnames = 'liqair_h'
argfmt = '({0:1d},{1:1d},{2:1d},{3:3g},{4:6g},{5:s}={6:3g})'
header = 'Wet air enthalpy derivatives'
eqfun = liqair4c.eq_wpte
eqargs = args1
eqkwargs = fkwargs
eqkeys = ['airf','temp','dhum','dliq']
test_der = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [liqair4c.temperature,liqair4c.lapserate,liqair4c.cp,
liqair4c.kappa_s,liqair4c.density]
fargs = args1
refs = [280.393544899,1.55056640703e-4,3147.74000558,9.43218891071e-6,
2.45776980040]
refs_alt = [None,1.55067379031e-4,3144.21265404,9.43218607469e-6,None]
fnames = ['temperature','lapserate','cp','kappa_s','density']
argfmt = '({0:3g},{1:6g},{2:s}={3:3g})'
header = 'Wet air enthalpy functions'
test_fun = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=eqargs,eqkwargs=eqkwargs,
eqkeys=eqkeys,refs_alt=refs_alt)
funs = [liqair4c.pottemp,liqair4c.potdensity,liqair4c.potenthalpy]
fargs = (0.5,300.,1e4,1e5)
refs = [348.222379217,1.22550664945,655155.797982]
fnames = ['pottemp','potdensity','potenthalpy']
argfmt = '({0:3g},{1:3g},{2:5g},{3:6g})'
header = 'Wet air potential functions'
eqfun = liqair4c.eq_pot
eqargs = fargs
eqkeys = ['airf','dhum','dliq','apot','tpot','dhpot','dlpot']
test_pot = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=eqargs,eqkeys=eqkeys)
return (test_der, test_fun, test_pot)
def genliqiceair4():
"""Generate liqiceair4 Testers.
"""
from teospy import liqiceair4
funs = [liqiceair4.pressure,liqiceair4.temperature]
fargs = tuple()
fkwargs = {'airf': 0.99}
refs = [38338.9622424,273.157198087]
fnames = ['pressure','temperature']
argfmt = '({0:s}={1:4g})'
header = 'Wet-icy air at airf'
eqfun = liqiceair4.eq_atp
eqkeys = ['airf','temp','pres','<KEY>']
test_a = Tester(funs,fargs,refs,fnames,argfmt,header=header,fkwargs=fkwargs,
eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [liqiceair4.pressure,liqiceair4.airfraction]
fkwargs = {'temp': 273.155}
refs = [67931.6010764,0.994366063923]
fnames = ['pressure','airfraction']
argfmt = '({0:s}={1:7.3f})'
header = 'Wet-icy air at temp'
test_t = Tester(funs,fargs,refs,fnames,argfmt,header=header,fkwargs=fkwargs,
eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [liqiceair4.airfraction,liqiceair4.temperature]
fkwargs = {'pres': 1e4}
refs = [0.961024307544,273.159302793]
fnames = ['airfraction','temperature']
argfmt = '({0:s}={1:5g})'
header = 'Wet-icy air at pres'
test_p = Tester(funs,fargs,refs,fnames,argfmt,header=header,fkwargs=fkwargs,
eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [liqiceair4.entropy,liqiceair4.enthalpy,liqiceair4.density,
liqiceair4.pressure,liqiceair4.temperature]
fargs = (0.1,)
fkwargs = {'wliq': 0.2, 'wice': 0.3}
refs = [3496.16306903,900361.135280,0.012136403756794166,706.817425301,
273.159992933]
refs_alt = [None,None,474.974398769,None,None]
fnames = ['entropy','enthalpy','density','pressure','temperature']
argfmt = '({0:3g},{1:s}={2:3g},{3:s}={4:3g})'
header = 'Wet-icy air at (wair,wliq,wice)'
eqfun = liqiceair4.eq_wefli
test_wli = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [liqiceair4.enthalpy,liqiceair4.density,liqiceair4.pressure,
liqiceair4.airfraction,liqiceair4.solidfraction,
liqiceair4.liquidfraction,liqiceair4.vapourfraction,
liqiceair4.temperature]
fargs = (0.99,)
fkwargs = {'entr': 0., 'wetf': 0.5}
refs = [7356.12943724,1.436115286795335,112016.075795,0.996583352944,
3.30296152581e-3,3.30296152581e-3,3.39407694837e-3,273.151724970]
refs_alt = [None,7.74757979404,None,None,None,None,None,None]
fnames = ['enthalpy','density','pressure','airfraction','solidfraction',
'liquidfraction','vapourfraction','temperature']
argfmt = '({0:4g},{1:s}={2:2g},{3:s}={4:3g})'
header = 'Wet-icy air at (wair,entr,wetf)'
test_wef = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [liqiceair4.ifl,liqiceair4.iml]
fargs = (.99,100.)
refs = [83234.7314360,81605.5557728]
fnames = ['ifl','iml']
argfmt = '({0:3g},{1:3g})'
header = 'Wet-icy air isentropic levels'
test_ifml = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (test_a, test_t, test_p, test_wli, test_wef, test_ifml)
def gensealiq4():
"""Generate sealiq4 Testers.
"""
from teospy import sealiq4
funs = sealiq4.osmoticpressure
fargs = (0.035,300.,1e5)
refs = 2594603.20968
fnames = 'osmoticpressure'
argfmt = '({0:5.3f},{1:3g},{2:6g})'
header = 'Seawater-pure liquid equilibrium'
test = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (test,)
def genseavap4():
"""Generate seavap4 Testers.
"""
from teospy import seavap4
funs = seavap4.boilingtemperature
fargs = (0.035,640.)
refs = 274.042416829
fnames = 'boilingtemperature'
argfmt = '({0:5.3f},{1:3g})'
header = 'Seawater-vapour boiling temperature'
test_bt = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = seavap4.vapourpressure
fargs = (0.035,274.)
refs = 638.044692615
fnames = 'vapourpressure'
argfmt = '({0:5.3f},{1:3g})'
header = 'Seawater-vapour vapour pressure'
test_vp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = seavap4.brinesalinity
fargs = (274.,640.)
refs = 2.94396298294e-2
fnames = 'brinesalinity'
argfmt = '({0:3g},{1:3g})'
header = 'Seawater-vapour brine salinity'
test_bs = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [seavap4.densitysea,seavap4.densityvap,seavap4.enthalpyevap,
seavap4.enthalpysea,seavap4.enthalpyvap,seavap4.entropysea,
seavap4.entropyvap,seavap4.volumeevap]
fargs = tuple()
fkwargs = {'salt': 0.035, 'pres': 640.}
refs = [1027.87349556,5.06324890264e-3,2498295.32187,3465.122066144071,
2502546.89358,13.061700450797833,9140.56256065,197.500648110]
refs_alt = [None,None,None,3465.11896144,None,13.0616891215,None,None]
fnames = ['densitysea','densityvap','enthalpyevap','enthalpysea',
'enthalpyvap','entropysea','entropyvap','volumeevap']
argfmt = '({0:s}={1:5.3f},{2:s}={3:g})'
header = 'Seawater-vapour at salinity and pressure'
eqfun = seavap4.eq_stp
eqkeys = ['salt','temp','pres','dliq','dvap']
test_sp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = [seavap4.densitysea,seavap4.densityvap,seavap4.enthalpyevap,
seavap4.enthalpysea,seavap4.enthalpyvap,seavap4.entropysea,
seavap4.entropyvap,seavap4.volumeevap]
fargs = tuple()
fkwargs = {'salt': 0.035, 'temp': 274.}
refs = [1027.87626132,5.04855547811e-3,2498395.40101,3295.96629299,
2502469.07187,12.4443983378,9141.68990452,198.075461154]
fnames = ['densitysea','densityvap','enthalpyevap','enthalpysea',
'enthalpyvap','entropysea','entropyvap','volumeevap']
argfmt = '({0:s}={1:5.3f},{2:s}={3:g})'
header = 'Seawater-vapour at salinity and temperature'
test_st = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [seavap4.densitysea,seavap4.densityvap,seavap4.enthalpyevap,
seavap4.enthalpysea,seavap4.enthalpyvap,seavap4.entropysea,
seavap4.entropyvap,seavap4.volumeevap]
fargs = tuple()
fkwargs = {'temp': 274., 'pres': 640.}
refs = [1023.42713047,5.06403699513e-3,2498551.19875,3405.93353730,
2502466.96633,14.0256815112,9140.27087793,197.469911653]
fnames = ['densitysea','densityvap','enthalpyevap','enthalpysea',
'enthalpyvap','entropysea','entropyvap','volumeevap']
argfmt = '({0:s}={1:3g},{2:s}={3:g})'
header = 'Seawater-vapour at temperature and pressure'
test_tp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = seavap4.seavap_g
args1 = (0.035,274.,610.)
fargs = [(der+args1) for der in _DERS3]
refs = [-2748.82963245,151028.257424,-6072.50817709,137.534028399,0.,
88286.38618253275,-1990.1384855543138,-2760.11106421,63.1093348229,
-1.65027885871]
refs_alt = [None,None,None,None,None,14965.0677011,-321.591932572,None,None,
None]
fnames = 'seavap_g'
argfmt = '({0:5.3f},{1:3g},{2:3g})'
header = 'Seawater-vapour parcel Gibbs function'
eqfun = seavap4.eq_seavap
eqkeys = ['salt','dliq','dvap']
test_der = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=args1,eqkeys=eqkeys,refs_alt=refs_alt)
funs = [seavap4.cp,seavap4.density,seavap4.enthalpy,seavap4.entropy,
seavap4.expansion,seavap4.kappa_t]
fargs = (0.035,274.,610.)
refs = [756270.431593,7.27092786882e-3,1661118.41089,6072.50817709,
0.458863421347,1.19990585451e-2]
fnames = ['cp','density','enthalpy','entropy','expansion','kappa_t']
argfmt = '({0:5.3f},{1:3g},{2:3g})'
header = 'Seawater-vapour parcel functions'
test_fun = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=fargs,eqkeys=eqkeys)
funs = seavap4.brinefraction
fargs = (0.0035,274.,640.)
refs = 0.118887364425
fnames = 'brinefraction'
argfmt = '({0:6.4f},{1:3g},{2:3g})'
header = 'Seawater-vapour parcel brine fraction'
test_bf = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=fargs,eqkeys=eqkeys)
return (test_bt,test_vp,test_bs,test_sp,test_st,test_tp,test_der,test_fun,
test_bf)
def genseaice4():
"""Generate seaice4 Testers.
"""
from teospy import seaice4
funs = seaice4.brinesalinity
fargs = (270.,1e5)
refs = 0.0560264150322
fnames = 'brinesalinity'
argfmt = '({0:3g},{1:6g})'
header = 'Sea-ice brine salinity'
test_bs = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = seaice4.meltingpressure
fargs = (0.035,270.)
refs = 16132047.4385
fnames = 'meltingpressure'
argfmt = '({0:5.3f},{1:3g})'
header = 'Sea-ice melting pressure'
test_mp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [seaice4.freezingtemperature,seaice4.dtfdp,seaice4.dtfds]
fargs = (0.035,1e5)
refs = [271.240373585,7.48210942879e-8,-56.8751336296]
fnames = ['freezingtemperature','dtfdp','dtfds']
argfmt = '({0:5.3f},{1:6g})'
header = 'Sea-ice freezing temperature'
test_ft = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [seaice4.densityice,seaice4.densitysea,seaice4.enthalpymelt,
seaice4.volumemelt,seaice4.enthalpyice,seaice4.enthalpysea,
seaice4.entropyice,seaice4.entropysea]
fargs = tuple()
fkwargs = {'temp': 270., 'pres': 1e5}
refs = [917.181167192,1045.16805918,328249.119579,-9.181869179e-5,
-339929.555499,-12742.8664892,-1244.97335506,-53.1667911144]
fnames = ['densityice','densitysea','enthalpymelt','volumemelt',
'enthalpyice','enthalpysea','entropyice','entropysea']
argfmt = '({0:s}={1:3g},{2:s}={3:6g})'
header = 'Sea-ice at temperature and pressure'
eqfun = seaice4.eq_stp
eqkeys = ['salt','temp','pres','dliq']
test_tp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
fkwargs = {'salt': 0.035, 'temp': 270.}
refs = [918.898527655,1035.73670169,326829.393605,-9.67135426848e-5,
-323205.968289,2832.94910407,-1247.71314646,-46.7361169560]
argfmt = '({0:s}={1:5.3f},{2:s}={3:3g})'
header = 'Sea-ice at salinity and temperature'
test_st = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
fkwargs = {'salt': 0.035, 'pres': 1e5}
refs = [917.000739687,1028.05199645,329942.976285,-9.10140854473e-5,
-337351.999358,-7613.19337919,-1235.44872812,-27.9264598103]
argfmt = '({0:s}={1:5.3f},{2:s}={3:6g})'
header = 'Sea-ice at salinity and pressure'
test_sp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = seaice4.seaice_g
args1 = (0.035,270.,1e5)
fargs = [(der+args1) for der in _DERS3]
refs = [-414.017574547,96363.7730495,500.445444181,1.00689072300e-3,0.,
-21272.226025171047,-0.002383040378214491,-232.847783380,
1.19590706917e-7,-1.57591932118e-12]
refs_alt = [None,None,None,None,None,-1144.02883419,-8.62856321467e-4,None,
-1.65866446694e-5,None]
fnames = 'seaice_g'
argfmt = '({0:5.3f},{1:3g},{2:6g})'
header = 'Sea-ice parcel Gibbs function'
eqfun = seaice4.eq_seaice
eqkeys = ['salt','dliq']
test_der = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=args1,eqkeys=eqkeys,refs_alt=refs_alt)
funs = [seaice4.brinefraction,seaice4.cp,seaice4.density,seaice4.enthalpy,
seaice4.entropy,seaice4.expansion,seaice4.kappa_t]
fargs = args1
refs = [0.6247053283,62868.9015126,993.156434117,-135534.287503,
-500.445444181,1.18772280035e-4,1.56513441348e-9]
refs_alt = [None,None,None,None,None,-1.64731328738e-2,None]
fnames = ['brinefraction','cp','density','enthalpy','entropy','expansion',
'kappa_t']
header = 'Sea-ice parcel functions'
test_fun = Tester(funs,fargs,refs,fnames,argfmt,header=header,eqfun=eqfun,
eqargs=fargs,eqkeys=eqkeys,refs_alt=refs_alt)
return (test_bs,test_mp,test_ft,test_tp,test_st,test_sp,test_der,test_fun)
def genseaicevap4():
"""Generate seaicevap4 Testers.
"""
from teospy import seaicevap4
funs = [seaicevap4.densityvap,seaicevap4.temperature,seaicevap4.pressure]
fargs = tuple()
fkwargs = {'salt': 0.035}
refs = [4.17156419318e-3,271.247815057,521.950349225]
fnames = ['densityvap','temperature','pressure']
argfmt = '({0:s}={1:5.3f})'
header = 'Sea-ice-vap at salinity'
eqfun = seaicevap4.eq_stp
eqkeys = ['salt','temp','pres','dliq','dvap']
test_s = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [seaicevap4.densityvap,seaicevap4.salinity,seaicevap4.pressure]
fkwargs = {'temp': 270.}
refs = [3.77406140772e-3,5.61489288506e-2,470.059067981]
fnames = ['densityvap','salinity','pressure']
argfmt = '({0:s}={1:3g})'
header = 'Sea-ice-vap at temperature'
test_t = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys)
funs = [seaicevap4.densityvap,seaicevap4.salinity,seaicevap4.temperature]
fkwargs = {'pres': 500.}
refs = [4.00364833230e-3,4.38955766482e-2,270.734430917]
refs_alt = [None,4.38955878828e-2,None]
fnames = ['densityvap','salinity','temperature']
argfmt = '({0:s}={1:3g})'
header = 'Sea-ice-vap at pressure'
test_p = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
return (test_s, test_t, test_p)
def genseaair4():
"""Generate seaair4 Testers.
"""
from teospy import seaair4
funs = [seaair4.massfractionair,seaair4.vapourpressure]
fargs = (0.035,300.,1e5)
refs = [0.978029483888,3485.92986681]
fnames = ['massfractionair','vapourpressure']
argfmt = '({0:5.3f},{1:3g},{2:6g})'
header = 'Sea-air dry fraction and vap pressure'
test_ma = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = seaair4.condensetemp
fargs = (0.035,0.99,1e5)
refs = 287.367451766
refs_alt = 287.367456468
fnames = 'condensetemp'
argfmt = '({0:5.3f},{1:4.2f},{2:6g})'
header = 'Sea-air condense temp'
test_ct = Tester(funs,fargs,refs,fnames,argfmt,header=header,
refs_alt=refs_alt)
funs = [seaair4.entropyair,seaair4.enthalpyevap,seaair4.densityair,
seaair4.densityvap]
fargs = tuple()
fkwargs = {'salt': 0.035, 'temp': 300., 'pres': 1e5}
refs = [293.150672957,2434570.55229,1.14642944448,2.51876465812e-2]
refs_alt = [None,2434549.89770,None,None]
fnames = ['entropyair','enthalpyevap','densityair','densityvap']
argfmt = '({0:s}={1:5.3f},{2:s}={3:3g},{4:s}={5:6g})'
header = 'Sea-air at salt, temp, and pres'
eqfun = seaair4.eq_satp
eqkeys = ['salt','airf','temp','pres','dhum','dliq']
test_stp = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = [seaair4.enthalpyevap,seaair4.densityair,seaair4.densityvap]
fkwargs = {'salt': 0.035, 'airf': 0.99, 'pres': 1e5}
refs = [2464765.77588,1.20553990576,1.20553990576e-2]
refs_alt = [2464738.79136,1.20553988598,1.20553988598e-2]
fnames = ['enthalpyevap','densityair','densityvap']
argfmt = '({0:s}={1:5.3f},{2:s}={3:3g},{4:s}={5:6g})'
header = 'Sea-air at salt, airf, and pres'
test_sap = Tester(funs,fargs,refs,fnames,argfmt,header=header,
fkwargs=fkwargs,eqfun=eqfun,eqargs=fargs,eqkwargs=fkwargs,eqkeys=eqkeys,
refs_alt=refs_alt)
funs = seaair4.chempotevap
fargs = (0.9,0.035,300.,1e5)
refs = 1.45584069071
fnames = 'chempotevap'
argfmt = '({0:3.1f},{1:5.3f},{2:3g},{3:6g})'
header = 'Sea-air potential difference'
test_pd = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (test_ma,test_ct,test_stp,test_sap,test_pd)
def geniceflu5():
"""Generate iceflu5 Testers.
"""
from teospy import iceflu5
funs = iceflu5.liqpressure
fargs = (272.4,)
refs = 1.00213724736e7
fnames = 'liqpressure'
argfmt = '({0:5.1f})'
header = 'Approximate melting pressure of ice'
test_lp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = iceflu5.liqtemperature
fargs = (1e7,)
refs = 272.401569225
fnames = 'liqtemperature'
argfmt = '({0:4g})'
header = 'Approximate melting temperature of ice'
test_lt = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = iceflu5.vappressure
fargs = (270.,)
refs = 470.061877574
fnames = 'vappressure'
argfmt = '({0:3g})'
header = 'Approximate vapour pressure over ice'
test_vp = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (test_lp, test_lt, test_vp)
## Dictionary relating modules to functions
_GENDICT = {'liqvap4': genliqvap4, 'iceliq4': geniceliq4, 'icevap4': genicevap4,
'iceair4a': geniceair4a, 'iceair4b': geniceair4b, 'iceair4c': geniceair4c,
'liqair4a': genliqair4a, 'liqair4b': genliqair4b, 'liqair4c': genliqair4c,
'liqiceair4': genliqiceair4, 'sealiq4': gensealiq4, 'seavap4': genseavap4,
'seaice4': genseaice4, 'seaicevap4': genseaicevap4, 'seaair4': genseaair4,
'iceflu5': geniceflu5}
## See if all values fall within the given tolerances
if __name__ == "__main__":
# Figure out which dictionaries to include
import sys
if len(sys.argv) == 1:
testlist = list()
for (modname,genfun) in _GENDICT.items():
testlist += list(genfun())
else:
modlist = list()
testlist = list()
for arg in sys.argv[1:]:
for (modname,genfun) in _GENDICT.items():
if arg in modname and modname not in modlist:
modlist.append(modname)
testlist += list(genfun())
# Run tests
with warnings.catch_warnings():
warnstart = 'Step sizes are smaller than accepted tolerance.'
warnings.filterwarnings('ignore',warnstart,RuntimeWarning)
for test in testlist:
test.run()
test.printresults()
| [
"teospy.tests.tester.Tester",
"warnings.catch_warnings",
"warnings.filterwarnings"
] | [((2938, 3077), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys)\n', (2944, 3077), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((3958, 4097), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys)\n', (3964, 4097), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((5080, 5219), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys)\n', (5086, 5219), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((6093, 6232), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys)\n', (6099, 6232), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((7213, 7352), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys)\n', (7219, 7352), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((8226, 8365), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys)\n', (8232, 8365), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((9056, 9219), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (9062, 9219), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((9737, 9900), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (9743, 9900), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((10441, 10604), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (10447, 10604), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((11186, 11349), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (11192, 11349), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((11564, 11620), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (11570, 11620), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((11801, 11857), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (11807, 11857), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((12047, 12103), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (12053, 12103), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((12308, 12364), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (12314, 12364), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((12676, 12779), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkeys=eqkeys)\n', (12682, 12779), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((13047, 13103), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (13053, 13103), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((13367, 13423), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (13373, 13423), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((14349, 14513), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'keepkeys': 'keepkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, keepkeys=keepkeys,\n refs_alt=refs_alt)\n', (14355, 14513), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((15242, 15406), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'keepkeys': 'keepkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, keepkeys=keepkeys,\n refs_alt=refs_alt)\n', (15248, 15406), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((16246, 16409), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (16252, 16409), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((16982, 17145), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (16988, 17145), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((17560, 17663), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkeys=eqkeys)\n', (17566, 17663), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((18423, 18584), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (18429, 18584), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((19135, 19296), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (19141, 19296), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((19844, 20005), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (19850, 20005), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((20560, 20721), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (20566, 20721), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((20940, 20996), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (20946, 20996), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((21194, 21250), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (21200, 21250), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((21430, 21486), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (21436, 21486), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((21785, 21887), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=fargs, eqkeys=eqkeys)\n', (21791, 21887), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((22160, 22216), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (22166, 22216), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((22486, 22542), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (22492, 22542), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((23442, 23606), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'keepkeys': 'keepkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, keepkeys=keepkeys,\n refs_alt=refs_alt)\n', (23448, 23606), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((24333, 24497), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'keepkeys': 'keepkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, keepkeys=keepkeys,\n refs_alt=refs_alt)\n', (24339, 24497), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((25332, 25495), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (25338, 25495), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((25965, 26128), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkwargs': 'eqkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=eqargs, eqkwargs=eqkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (25971, 26128), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((26557, 26660), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'eqargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=eqargs, eqkeys=eqkeys)\n', (26563, 26660), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((27138, 27275), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (27144, 27275), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((27526, 27663), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (27532, 27663), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((27914, 28051), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (27920, 28051), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((28600, 28737), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (28606, 28737), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((29480, 29617), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (29486, 29617), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((29844, 29900), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (29850, 29900), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((30273, 30329), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (30279, 30329), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((30655, 30711), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (30661, 30711), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((30920, 30976), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (30926, 30976), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((31182, 31238), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (31188, 31238), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((32034, 32195), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (32040, 32195), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((32836, 32973), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (32842, 32973), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((33608, 33745), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (33614, 33745), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((34315, 34436), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'args1', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=args1, eqkeys=eqkeys, refs_alt=refs_alt)\n', (34321, 34436), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((34873, 34975), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=fargs, eqkeys=eqkeys)\n', (34879, 34975), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((35204, 35306), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=fargs, eqkeys=eqkeys)\n', (35210, 35306), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((35686, 35742), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (35692, 35742), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((35946, 36002), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (35952, 36002), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((36299, 36355), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (36305, 36355), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((37055, 37192), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (37061, 37192), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((37488, 37625), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (37494, 37625), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((37918, 38055), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (37924, 38055), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((38639, 38760), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'args1', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=args1, eqkeys=eqkeys, refs_alt=refs_alt)\n', (38645, 38760), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((39263, 39384), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, eqfun=eqfun,\n eqargs=fargs, eqkeys=eqkeys, refs_alt=refs_alt)\n', (39269, 39384), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((39963, 40100), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (39969, 40100), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((40401, 40538), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys)\n', (40407, 40538), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((40886, 41047), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (40892, 41047), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((41461, 41517), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (41467, 41517), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((41754, 41829), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, refs_alt=refs_alt)\n', (41760, 41829), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((42407, 42568), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (42413, 42568), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((42997, 43158), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header', 'fkwargs': 'fkwargs', 'eqfun': 'eqfun', 'eqargs': 'fargs', 'eqkwargs': 'fkwargs', 'eqkeys': 'eqkeys', 'refs_alt': 'refs_alt'}), '(funs, fargs, refs, fnames, argfmt, header=header, fkwargs=fkwargs,\n eqfun=eqfun, eqargs=fargs, eqkwargs=fkwargs, eqkeys=eqkeys, refs_alt=\n refs_alt)\n', (43003, 43158), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((43381, 43437), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (43387, 43437), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((43776, 43832), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (43782, 43832), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((44033, 44089), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (44039, 44089), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((44283, 44339), 'teospy.tests.tester.Tester', 'Tester', (['funs', 'fargs', 'refs', 'fnames', 'argfmt'], {'header': 'header'}), '(funs, fargs, refs, fnames, argfmt, header=header)\n', (44289, 44339), False, 'from teospy.tests.tester import Tester, _DERS3\n'), ((45462, 45487), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (45485, 45487), False, 'import warnings\n'), ((45567, 45627), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', 'warnstart', 'RuntimeWarning'], {}), "('ignore', warnstart, RuntimeWarning)\n", (45590, 45627), False, 'import warnings\n')] |
import keras
import keras.backend as K
from keras import layers
from keras.layers import *
from keras.models import *
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def get_resnet50_encoder(input_height=224, input_width=224):
img_input = Input([input_height, input_width, 3])
# 416,416,3 -> 208,208,64
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(name='bn_conv1')(x)
x = Activation('relu')(x)
f1 = x
# 208,208,64 -> 104,104,64 -> 104,104,256
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# f2是hw方向压缩两次的结果
f2 = x
# 104,104,256 -> 52,52,512
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# f3是hw方向压缩三次的结果
f3 = x
# 52,52,512 -> 26,26,1024
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
# f4是hw方向压缩四次的结果
f4 = x
# 26,26,1024 -> 13,13,2048
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
# f5是hw方向压缩五次的结果
f5 = x
return img_input, [f1 , f2 , f3 , f4 , f5]
| [
"keras.layers.add"
] | [((838, 867), 'keras.layers.add', 'layers.add', (['[x, input_tensor]'], {}), '([x, input_tensor])\n', (848, 867), False, 'from keras import layers\n'), ((1887, 1912), 'keras.layers.add', 'layers.add', (['[x, shortcut]'], {}), '([x, shortcut])\n', (1897, 1912), False, 'from keras import layers\n')] |
#! /usr/bin/env python3
import re
import sys
import time
from collections import defaultdict
from fractions import Fraction
from typing import Generator, Iterable, List, Match, Optional, Tuple
test = True
debug = False
stdin = False
INFILENAME = "inputs/05.txt"
TESTFILENAME = "inputs/05.test.txt"
for arg in sys.argv:
if arg == "--notest":
test = False
if arg == "--debug":
debug = True
if arg == "--stdin":
stdin = True
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
# Modified range functions
def irange(start, end=None, step=1) -> Generator[int, None, None]:
"""Inclusive range function."""
if end is None:
start, end = 0, start
yield from range(start, end + 1, step=step)
def dirange(start, end=None, step=1) -> Generator[int, None, None]:
"""
Directional, inclusive range. This range function is an inclusive version of
:class:`range` that figures out the correct step direction to make sure that it goes
from `start` to `end`, even if `end` is before `start`.
>>> dirange(2, -2)
[2, 1, 0, -1, -2]
>>> dirange(-2)
[0, -1, -2]
>>> dirange(2)
[0, 1, 2]
"""
assert step > 0
if end is None:
start, end = 0, start
if end >= start:
yield from irange(start, end, step)
else:
yield from range(start, end - 1, step=-step)
# Utilities
def int_points_between(
start: Tuple[int, int], end: Tuple[int, int]
) -> Generator[Tuple[int, int], None, None]:
"""
Return a generator of all of the integer points between two given points. Note that
you are *not* guaranteed that the points will be given from `start` to `end`, but
all points will be included.
"""
x1, y1 = start
x2, y2 = end
if x1 == x2:
yield from ((x1, y) for y in dirange(y1, y2))
elif y1 == y2:
yield from ((x, y1) for x in dirange(x1, x2))
else:
# If `x1 > x2`, that means that `start` is to the right of `end`, so we need to
# switch the points around so iteration always goes in the positive `x`
# direction.
if x1 > x2:
x1, x2, y1, y2 = x2, x1, y2, y1
dy = y2 - y1
dx = x2 - x1
slope = Fraction(dy, dx)
for i in irange(dy // slope.numerator):
yield (x1 + (i * slope.denominator), y1 + (i * slope.numerator))
def rematch(pattern: str, s: str) -> Optional[Match]:
return re.fullmatch(pattern, s)
print(f"\n{'=' * 30}\n")
# Read the input
if stdin:
input_lines: List[str] = [l.strip() for l in sys.stdin.readlines()]
else:
with open(INFILENAME) as f:
input_lines: List[str] = [l.strip() for l in f.readlines()]
# Try and read in the test file.
try:
with open(TESTFILENAME) as f:
test_lines: List[str] = [l.strip() for l in f.readlines()]
except Exception:
test_lines = []
# Shared
########################################################################################
def parselines(lines: List[str]) -> Iterable[Tuple[Tuple[int, int], Tuple[int, int]]]:
for line in lines:
x1, y1, x2, y2 = map(int, rematch(r"(\d+),(\d+) -> (\d+),(\d+)", line).groups())
yield (x1, y1), (x2, y2)
# Part 1
########################################################################################
print("Part 1:")
def part1(lines: List[str]) -> int:
"""
For part 1, you only have to consider horizontal and vertical lines. That is, lines
where either x1 = x2 or y1 = y2.
"""
G = defaultdict(int)
for (x1, y1), (x2, y2) in parselines(lines):
if x1 != x2 and y1 != y2:
# This technique works for part 1 because x1 = x2 or y1 = y2 so sorting will
# actually do what we want (which is to put the one that is smaller first,
# making the range based for loop work better later).
continue
for x, y in int_points_between((x1, y1), (x2, y2)):
G[(x, y)] += 1
return sum([1 for x in G.values() if x > 1])
# Run test on part 1
if test:
print("Running test... ", end="")
if not test_lines:
print(f"{bcolors.FAIL}No test configured!{bcolors.ENDC}")
else:
test_ans_part1 = part1(test_lines)
expected = 5
if expected is None:
print(f"{bcolors.FAIL}No test configured!{bcolors.ENDC}")
elif test_ans_part1 == expected:
print(f"{bcolors.OKGREEN}PASS{bcolors.ENDC}")
else:
print(f"{bcolors.FAIL}FAIL{bcolors.ENDC}")
assert False
print("Result:", test_ans_part1)
print()
part1_start = time.time()
print("Running input...")
ans_part1 = part1(input_lines)
part1_end = time.time()
print("Result:", ans_part1)
tries = [
# Store the attempts that failed here.
]
if tries:
print("Tries Part 1:", tries)
assert ans_part1 not in tries, "Same as an incorrect answer!"
# Regression Test
expected = 4826
if expected is not None:
assert ans_part1 == expected
# Part 2
########################################################################################
print("\nPart 2:")
def part2(lines: List[str]) -> int:
G = defaultdict(int)
for p1, p2 in parselines(lines):
for x, y in int_points_between(p1, p2):
G[(x, y)] += 1
return sum([1 for x in G.values() if x > 1])
# Run test on part 2
if test:
print("Running test... ", end="")
if not test_lines:
print(f"{bcolors.FAIL}No test configured!{bcolors.ENDC}")
else:
test_ans_part2 = part2(test_lines)
expected = 12
if expected is None:
print(f"{bcolors.FAIL}No test configured!{bcolors.ENDC}")
elif test_ans_part2 == expected:
print(f"{bcolors.OKGREEN}PASS{bcolors.ENDC}")
else:
print(f"{bcolors.FAIL}FAIL{bcolors.ENDC}")
print(f"{bcolors.FAIL}Result: {test_ans_part2}{bcolors.ENDC}")
assert False
print("Result:", test_ans_part2)
print()
part2_start = time.time()
print("Running input...")
ans_part2 = part2(input_lines)
part2_end = time.time()
print("Result:", ans_part2)
tries2 = [
12730,
# Store the attempts that failed here.
]
if tries2:
print("Tries Part 2:", tries2)
assert ans_part2 not in tries2, "Same as an incorrect answer!"
# Regression Test
expected = 16793
if expected is not None:
assert ans_part2 == expected
if debug:
part1_time = part1_end - part1_start
part2_time = part2_end - part2_start
print()
print("DEBUG:")
print(f"Part 1: {part1_time * 1000}ms")
print(f"Part 2: {part2_time * 1000}ms")
print(f"TOTAL: {(part1_time + part2_time) * 1000}ms")
| [
"fractions.Fraction",
"sys.stdin.readlines",
"re.fullmatch",
"collections.defaultdict",
"time.time"
] | [((4787, 4798), 'time.time', 'time.time', ([], {}), '()\n', (4796, 4798), False, 'import time\n'), ((4868, 4879), 'time.time', 'time.time', ([], {}), '()\n', (4877, 4879), False, 'import time\n'), ((6185, 6196), 'time.time', 'time.time', ([], {}), '()\n', (6194, 6196), False, 'import time\n'), ((6266, 6277), 'time.time', 'time.time', ([], {}), '()\n', (6275, 6277), False, 'import time\n'), ((2614, 2638), 're.fullmatch', 're.fullmatch', (['pattern', 's'], {}), '(pattern, s)\n', (2626, 2638), False, 'import re\n'), ((3688, 3704), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3699, 3704), False, 'from collections import defaultdict\n'), ((5332, 5348), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5343, 5348), False, 'from collections import defaultdict\n'), ((2405, 2421), 'fractions.Fraction', 'Fraction', (['dy', 'dx'], {}), '(dy, dx)\n', (2413, 2421), False, 'from fractions import Fraction\n'), ((2743, 2764), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (2762, 2764), False, 'import sys\n')] |
import os
import unittest
import test_data.test_static
from jivago.wsgi.routing.serving.static_file_routing_table import StaticFileRoutingTable
FILENAME = "foobar.html"
class StaticFileRoutingTableTest(unittest.TestCase):
def setUp(self):
self.routing_table = StaticFileRoutingTable(os.path.dirname(test_data.test_static.__file__))
def test_whenGettingRouteRegistrations_thenReturnAnHttpGetRouteWhenItMatchesAFile(self):
registrations = self.routing_table.get_route_registrations(FILENAME)
self.assertEqual(1, len(registrations))
def test_givenInexistentFile_whenGettingRouteRegistrations_thenReturnEmptyList(self):
routes = self.routing_table.get_route_registrations("inexistent.html")
self.assertEqual(0, len(routes))
def test_givenDisallowedFileExtension_whenGettingRouteRegistrations_thenReturnEmptyList(self):
self.routing_table = StaticFileRoutingTable(os.path.dirname(test_data.test_static.__file__),
allowed_extensions=[".txt"])
routes = self.routing_table.get_route_registrations(FILENAME)
self.assertEqual(0, len(routes))
def test_givenDirectory_whenGettingRouteRegistrations_thenReturnEmptyList(self):
routes = self.routing_table.get_route_registrations("")
self.assertEqual(0, len(routes))
| [
"os.path.dirname"
] | [((300, 347), 'os.path.dirname', 'os.path.dirname', (['test_data.test_static.__file__'], {}), '(test_data.test_static.__file__)\n', (315, 347), False, 'import os\n'), ((933, 980), 'os.path.dirname', 'os.path.dirname', (['test_data.test_static.__file__'], {}), '(test_data.test_static.__file__)\n', (948, 980), False, 'import os\n')] |
import datetime
import email
import os
from dateutil.tz import tzlocal
from django.test import SimpleTestCase
from corehq.util.bounced_email_manager import BouncedEmailManager
from corehq.util.models import AwsMeta
from corehq.util.test_utils import TestFileMixin
class TestBouncedEmailManager(SimpleTestCase, TestFileMixin):
file_path = ('data', 'email')
root = os.path.dirname(__file__)
def setUp(self):
self.manager = BouncedEmailManager()
def _get_message(self, filename):
bounce_file = self.get_path(filename, 'txt')
with open(bounce_file, "r") as f:
bounce_email = f.read()
message = email.message_from_string(bounce_email)
return message
def test_recipients_standard_aws_bounce(self):
aws_bounce = self._get_message('standard_aws_bounce')
self.assertEqual(
self.manager._get_raw_bounce_recipients(aws_bounce),
[
'<EMAIL>',
'<EMAIL>'
]
)
def test_recipients_email_delivery_failure(self):
delivery_failure = self._get_message('email_delivery_failure')
self.assertEqual(
self.manager._get_raw_bounce_recipients(delivery_failure),
['<EMAIL>']
)
def test_recipients_yahoo_qmail(self):
yahoo_qmail = self._get_message('yahoo_qmail_failure')
self.assertEqual(
self.manager._get_raw_bounce_recipients(yahoo_qmail),
['<EMAIL>']
)
def test_recipients_forwarded_bounce(self):
forwarded_bounce = self._get_message('forwarded_bounce')
self.assertEqual(
self.manager._get_raw_bounce_recipients(forwarded_bounce),
['<EMAIL>']
)
def test_recipients_exchange_bounce(self):
exchange_bounce = self._get_message('exchange_bounce')
self.assertEqual(
self.manager._get_raw_bounce_recipients(exchange_bounce),
['<EMAIL>']
)
def test_recipients_auto_reply(self):
out_of_office = self._get_message('out_of_office')
self.assertEqual(
self.manager._get_raw_bounce_recipients(out_of_office),
None
)
def test_sns_bounce_suppressed(self):
sns_bounce_suppressed = self._get_message('sns_bounce_suppressed')
self.assertEqual(
self.manager._get_aws_info(sns_bounce_suppressed, 333),
[
AwsMeta(
notification_type='Bounce',
main_type='Permanent',
sub_type='Suppressed',
email='<EMAIL>',
reason='Amazon SES has suppressed sending to this address '
'because it has a recent history of bouncing as an '
'invalid address. For more information about how '
'to remove an address from the suppression list, '
'see the Amazon SES Developer Guide: '
'http://docs.aws.amazon.com/ses/latest/'
'DeveloperGuide/remove-from-suppressionlist.html ',
headers={
'from': ['<EMAIL>'],
'date': 'Tue, 28 Jan 2020 10:40:04 -0000',
'to': ['<EMAIL>'],
'messageId': '<redacted>',
'subject': 'Late'
},
timestamp=datetime.datetime(
2020, 1, 28, 10, 40, 4, 931000,
tzinfo=tzlocal()
),
destination=['<EMAIL>']
)
]
)
def test_sns_bounce_general(self):
sns_bounce_general = self._get_message('sns_bounce_general')
self.assertEqual(
self.manager._get_aws_info(sns_bounce_general, 333),
[
AwsMeta(
notification_type='Bounce',
main_type='Permanent',
sub_type='General',
email='<EMAIL>',
reason="smtp; 550-5.1.1 The email account that you tried "
"to reach does not exist. Please try\n550-5.1.1 "
"double-checking the recipient's email address for "
"typos or\n550-5.1.1 unnecessary spaces. Learn more"
" at\n550 5.1.1 https://support.google.com/mail/?p="
"NoSuchUser h6si12061056qtp.98 - gsmtp",
headers={
'returnPath': '<EMAIL>',
'from': ['<EMAIL>'],
'date': 'Tue, 28 Jan 2020 09:29:02 -0000',
'to': ['<EMAIL>'],
'messageId': '<redacted>',
'subject': 'Activate your CommCare project'
},
timestamp=datetime.datetime(
2020, 1, 28, 9, 29, 3, 30000,
tzinfo=tzlocal()
),
destination=['<EMAIL>']
)
]
)
def test_sns_bounce_transient(self):
sns_bounce_transient = self._get_message('sns_bounce_transient')
self.assertEqual(
self.manager._get_aws_info(sns_bounce_transient, 333),
[
AwsMeta(
notification_type='Bounce',
main_type='Transient',
sub_type='General',
email='<EMAIL>',
reason=None,
headers={
'returnPath': '<EMAIL>',
'from': ['<EMAIL>'],
'date': 'Tue, 28 Jan 2020 13:00:27 -0000',
'to': ['<EMAIL>'],
'messageId': '<redacted>',
'subject': 'Scheduled report from CommCare HQ'
},
timestamp=datetime.datetime(
2020, 1, 28, 13, 0, 35,
tzinfo=tzlocal()
),
destination=['<EMAIL>']
)
]
)
def test_sns_bounce_complaint(self):
sns_complaint = self._get_message('sns_complaint')
self.assertEqual(
self.manager._get_aws_info(sns_complaint, 333),
[
AwsMeta(
notification_type='Complaint',
main_type=None, sub_type='',
email='<EMAIL>',
reason=None,
headers={},
timestamp=datetime.datetime(
2020, 1, 8, 8, 6, 45,
tzinfo=tzlocal()
),
destination=['<EMAIL>']
)
]
)
| [
"corehq.util.bounced_email_manager.BouncedEmailManager",
"os.path.dirname",
"email.message_from_string",
"dateutil.tz.tzlocal"
] | [((375, 400), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (390, 400), False, 'import os\n'), ((446, 467), 'corehq.util.bounced_email_manager.BouncedEmailManager', 'BouncedEmailManager', ([], {}), '()\n', (465, 467), False, 'from corehq.util.bounced_email_manager import BouncedEmailManager\n'), ((660, 699), 'email.message_from_string', 'email.message_from_string', (['bounce_email'], {}), '(bounce_email)\n', (685, 699), False, 'import email\n'), ((3606, 3615), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (3613, 3615), False, 'from dateutil.tz import tzlocal\n'), ((5107, 5116), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (5114, 5116), False, 'from dateutil.tz import tzlocal\n'), ((6181, 6190), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (6188, 6190), False, 'from dateutil.tz import tzlocal\n'), ((6854, 6863), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (6861, 6863), False, 'from dateutil.tz import tzlocal\n')] |
import numpy as np
def sqrt_gain_db(gain_db):
return gain_db / 2
def design_low_shelving_filter(g_db, f, q, fs):
k = np.tan((np.pi * f) / fs);
v0 = np.power(10.0, g_db / 20.0);
root2 = 1.0 / q
if v0 < 1:
v0 = 1 / v0
if g_db > 0:
b0 = (1 + np.sqrt(v0) * root2 * k + v0 * k * k) / (1 + root2 * k + k * k)
b1 = (2 * (v0 * k * k - 1)) / (1 + root2 * k + k * k)
b2 = (1 - np.sqrt(v0) * root2 * k + v0 * k * k) / (1 + root2 * k + k * k)
a0 = 1
a1 = (2 * (k * k - 1)) / (1 + root2 * k + k * k)
a2 = (1 - root2 * k + k * k) / (1 + root2 * k + k * k)
elif g_db < 0:
b0 = (1 + root2 * k + k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
b1 = (2 * (k * k - 1)) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
b2 = (1 - root2 * k + k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
a0 = 1
a1 = (2 * (v0 * k * k - 1)) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
a2 = (1 - root2 * np.sqrt(v0) * k + v0 * k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
else:
b0 = 1
b1 = 0
b2 = 0
a0 = 1
a1 = 0
a2 = 0
return np.array([b0, b1, b2, a0, a1, a2])
def design_high_shelving_filter(g_db, f, q, fs):
k = np.tan(np.pi * f / fs)
v0 = np.power(10, g_db / 20)
root2 = 1 / q
if v0 < 1:
v0 = 1 / v0
if g_db > 0:
b0 = (v0 + root2 * np.sqrt(v0) * k + k * k) / (1 + root2 * k + k * k)
b1 = (2 * (k * k - v0)) / (1 + root2 * k + k * k)
b2 = (v0 - root2 * np.sqrt(v0) * k + k * k) / (1 + root2 * k + k * k)
a0 = 1
a1 = (2 * (k * k - 1)) / (1 + root2 * k + k * k)
a2 = (1 - root2 * k + k * k) / (1 + root2 * k + k * k)
elif g_db < 0:
b0 = (1 + root2 * k + k * k) / (v0 + root2 * np.sqrt(v0) * k + k * k)
b1 = (2 * (k * k - 1)) / (v0 + root2 * np.sqrt(v0) * k + k * k)
b2 = (1 - root2 * k + k * k) / (v0 + root2 * np.sqrt(v0) * k + k * k)
a0 = 1
a1 = (2 * ((k * k) / v0 - 1)) / (1 + root2 / np.sqrt(v0) * k + (k * k) / v0)
a2 = (1 - root2 / np.sqrt(v0) * k + (k * k) / v0) / (1 + root2 / np.sqrt(v0) * k + (k * k) / v0)
else:
b0 = 1
b1 = 0
b2 = 0
a0 = 1
a1 = 0
a2 = 0
return np.array([b0, b1, b2, a0, a1, a2])
def design_peak_filter(g_db, f, q, fs):
w_c = (2 * np.pi * f / fs);
mu = np.power(10, g_db / 20)
k_q = 4 / (1 + mu) * np.tan(w_c / (2 * q))
c_pk = (1 + k_q * mu) / (1 + k_q)
b0 = c_pk
b1 = c_pk * (-2 * np.cos(w_c) / (1 + k_q * mu))
b2 = c_pk * (1 - k_q * mu) / (1 + k_q * mu)
a0 = 1
a1 = -2 * np.cos(w_c) / (1 + k_q)
a2 = (1 - k_q) / (1 + k_q)
return np.array([b0, b1, b2, a0, a1, a2])
def iir_comb(x, gain, delay):
# BL = 1, FB = gain, FF = 0
y = np.zeros(delay + x.shape[0])
for i in range(x.shape[0]):
y[i + delay] = x[i] + gain * y[i]
y = y[delay:]
return y
def feedback_comb(x, gain, delay):
# BL = 0, FB = gain, FF = 1
y = np.zeros(delay + x.shape[0])
for i in range(x.shape[0]):
y[i + delay] = x[i] + gain * y[i]
y = y[:x.shape[0]]
return y
def all_pass_comb(x, gain, delay):
# BL = gain, FB = -gain, FF = 1
delayed_s = np.zeros(delay + x.shape[0])
y = np.zeros_like(x)
for i in range(x.shape[0]):
delayed_s[i + delay] = x[i] + -gain * delayed_s[i]
y[i] = gain * delayed_s[i + delay] + delayed_s[i]
return y
| [
"numpy.sqrt",
"numpy.tan",
"numpy.power",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.zeros_like"
] | [((129, 151), 'numpy.tan', 'np.tan', (['(np.pi * f / fs)'], {}), '(np.pi * f / fs)\n', (135, 151), True, 'import numpy as np\n'), ((164, 191), 'numpy.power', 'np.power', (['(10.0)', '(g_db / 20.0)'], {}), '(10.0, g_db / 20.0)\n', (172, 191), True, 'import numpy as np\n'), ((1196, 1230), 'numpy.array', 'np.array', (['[b0, b1, b2, a0, a1, a2]'], {}), '([b0, b1, b2, a0, a1, a2])\n', (1204, 1230), True, 'import numpy as np\n'), ((1290, 1312), 'numpy.tan', 'np.tan', (['(np.pi * f / fs)'], {}), '(np.pi * f / fs)\n', (1296, 1312), True, 'import numpy as np\n'), ((1322, 1345), 'numpy.power', 'np.power', (['(10)', '(g_db / 20)'], {}), '(10, g_db / 20)\n', (1330, 1345), True, 'import numpy as np\n'), ((2331, 2365), 'numpy.array', 'np.array', (['[b0, b1, b2, a0, a1, a2]'], {}), '([b0, b1, b2, a0, a1, a2])\n', (2339, 2365), True, 'import numpy as np\n'), ((2449, 2472), 'numpy.power', 'np.power', (['(10)', '(g_db / 20)'], {}), '(10, g_db / 20)\n', (2457, 2472), True, 'import numpy as np\n'), ((2765, 2799), 'numpy.array', 'np.array', (['[b0, b1, b2, a0, a1, a2]'], {}), '([b0, b1, b2, a0, a1, a2])\n', (2773, 2799), True, 'import numpy as np\n'), ((2872, 2900), 'numpy.zeros', 'np.zeros', (['(delay + x.shape[0])'], {}), '(delay + x.shape[0])\n', (2880, 2900), True, 'import numpy as np\n'), ((3086, 3114), 'numpy.zeros', 'np.zeros', (['(delay + x.shape[0])'], {}), '(delay + x.shape[0])\n', (3094, 3114), True, 'import numpy as np\n'), ((3317, 3345), 'numpy.zeros', 'np.zeros', (['(delay + x.shape[0])'], {}), '(delay + x.shape[0])\n', (3325, 3345), True, 'import numpy as np\n'), ((3354, 3370), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3367, 3370), True, 'import numpy as np\n'), ((2498, 2519), 'numpy.tan', 'np.tan', (['(w_c / (2 * q))'], {}), '(w_c / (2 * q))\n', (2504, 2519), True, 'import numpy as np\n'), ((2698, 2709), 'numpy.cos', 'np.cos', (['w_c'], {}), '(w_c)\n', (2704, 2709), True, 'import numpy as np\n'), ((2595, 2606), 'numpy.cos', 'np.cos', (['w_c'], {}), '(w_c)\n', (2601, 2606), True, 'import numpy as np\n'), ((285, 296), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (292, 296), True, 'import numpy as np\n'), ((429, 440), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (436, 440), True, 'import numpy as np\n'), ((1445, 1456), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1452, 1456), True, 'import numpy as np\n'), ((1581, 1592), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1588, 1592), True, 'import numpy as np\n'), ((699, 710), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (706, 710), True, 'import numpy as np\n'), ((775, 786), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (782, 786), True, 'import numpy as np\n'), ((857, 868), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (864, 868), True, 'import numpy as np\n'), ((953, 964), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (960, 964), True, 'import numpy as np\n'), ((1009, 1020), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1016, 1020), True, 'import numpy as np\n'), ((1054, 1065), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1061, 1065), True, 'import numpy as np\n'), ((1839, 1850), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1846, 1850), True, 'import numpy as np\n'), ((1911, 1922), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1918, 1922), True, 'import numpy as np\n'), ((1989, 2000), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (1996, 2000), True, 'import numpy as np\n'), ((2082, 2093), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (2089, 2093), True, 'import numpy as np\n'), ((2140, 2151), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (2147, 2151), True, 'import numpy as np\n'), ((2187, 2198), 'numpy.sqrt', 'np.sqrt', (['v0'], {}), '(v0)\n', (2194, 2198), True, 'import numpy as np\n')] |
"""MIT - CSAIL - Gifford Lab - seqgra
PyTorch DataSet class
@author: <NAME>
"""
from collections import deque
import random
from typing import Any, Deque, List, Tuple
import torch
import numpy as np
from seqgra.learner import Learner
class MultiClassDataSet(torch.utils.data.Dataset):
def __init__(self, x, y=None):
self.x = x
self.y = y
self.x = np.array(self.x).astype(np.float32)
if self.y is not None:
if not isinstance(self.y, np.ndarray):
self.y = np.array(self.y)
if self.y.dtype == np.bool:
self.y = np.argmax(self.y.astype(np.int64), axis=1)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if self.y is None:
return self.x[idx]
else:
return self.x[idx], self.y[idx]
class MultiLabelDataSet(torch.utils.data.Dataset):
def __init__(self, x, y=None):
self.x = x
self.y = y
self.x = np.array(self.x).astype(np.float32)
if self.y is not None:
if not isinstance(self.y, np.ndarray):
self.y = np.array(self.y)
if self.y.dtype == np.bool:
self.y = self.y.astype(np.float32)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if self.y is None:
return self.x[idx]
else:
return self.x[idx], self.y[idx]
class IterableMultiClassDataSet(torch.utils.data.IterableDataset):
def __init__(self, file_name: str, learner: Learner, shuffle: bool = False,
contains_y: bool = True, cache_size: int = 10000):
self.file_name: str = file_name
self.learner: Learner = learner
self.shuffle: bool = shuffle
self.contains_y: bool = contains_y
self.cache_size: int = cache_size
self.x_cache = None
self.y_cache = None
self.cache_index: int = cache_size
def __iter__(self):
with open(self.file_name, "r") as f:
# skip header
next(f)
x, y = self._get_next_example(f)
while x is not None:
if self.contains_y:
yield x, y
else:
yield x
x, y = self._get_next_example(f)
def _get_next_example(self, file_handle) -> Tuple[Any, Any]:
if self.x_cache is None or self.cache_index >= self.x_cache.shape[0]:
# read next chunk in memory
x_vec: List[str] = list()
y_vec: List[str] = list()
line: str = file_handle.readline()
i = 1
while line and i <= self.cache_size:
cells: List[str] = line.split("\t")
if len(cells) == 2 or (len(cells) == 1 and not self.contains_y):
x_vec.append(cells[0].strip())
if self.contains_y:
y_vec.append(cells[1].strip())
else:
raise Exception("invalid example: " + line)
line = file_handle.readline()
i += 1
if x_vec:
# validate data
if self.learner.validate_data:
self.learner.check_sequence(x_vec)
if self.contains_y:
self.learner.check_labels(y_vec)
# shuffle
if self.shuffle:
if self.contains_y:
temp = list(zip(x_vec, y_vec))
random.shuffle(temp)
x_vec, y_vec = zip(*temp)
else:
random.shuffle(x_vec)
# process chunk in memory
encoded_x_vec = self.learner.encode_x(x_vec)
if not isinstance(encoded_x_vec, np.ndarray):
encoded_x_vec = np.array(encoded_x_vec)
self.x_cache = encoded_x_vec.astype(np.float32)
if self.contains_y:
encoded_y_vec = self.learner.encode_y(y_vec)
if not isinstance(encoded_y_vec, np.ndarray):
encoded_y_vec = np.array(encoded_y_vec)
self.y_cache = np.argmax(encoded_y_vec.astype(np.int64),
axis=1)
self.cache_index = 0
if self.x_cache is not None and self.cache_index < self.x_cache.shape[0]:
if self.contains_y:
example = (self.x_cache[self.cache_index, ...],
self.y_cache[self.cache_index])
else:
example = (self.x_cache[self.cache_index, ...], None)
self.cache_index += 1
return example
else:
return (None, None)
class IterableMultiLabelDataSet(torch.utils.data.IterableDataset):
def __init__(self, file_name: str, learner: Learner, shuffle: bool = False,
contains_y: bool = True, cache_size: int = 10000):
self.file_name: str = file_name
self.learner: Learner = learner
self.shuffle: bool = shuffle
self.contains_y: bool = contains_y
self.cache_size: int = cache_size
self.x_cache = None
self.y_cache = None
self.cache_index: int = cache_size
def __iter__(self):
with open(self.file_name, "r") as f:
# skip header
next(f)
x, y = self._get_next_example(f)
while x is not None:
if self.contains_y:
yield x, y
else:
yield x
x, y = self._get_next_example(f)
def _get_next_example(self, file_handle) -> Tuple[Any, Any]:
if self.x_cache is None or self.cache_index >= self.x_cache.shape[0]:
# read next chunk in memory
x_vec: List[str] = list()
y_vec: List[str] = list()
line: str = file_handle.readline()
i = 1
while line and i <= self.cache_size:
cells: List[str] = line.split("\t")
if len(cells) == 2 or (len(cells) == 1 and not self.contains_y):
x_vec.append(cells[0].strip())
if self.contains_y:
y_vec.append(cells[1].strip())
else:
raise Exception("invalid example: " + line)
line = file_handle.readline()
i += 1
if x_vec:
# validate data
if self.learner.validate_data:
self.learner.check_sequence(x_vec)
if self.contains_y:
self.learner.check_labels(y_vec)
# shuffle
if self.shuffle:
if self.contains_y:
temp = list(zip(x_vec, y_vec))
random.shuffle(temp)
x_vec, y_vec = zip(*temp)
else:
random.shuffle(x_vec)
# process chunk in memory
encoded_x_vec = self.learner.encode_x(x_vec)
if not isinstance(encoded_x_vec, np.ndarray):
encoded_x_vec = np.array(encoded_x_vec)
self.x_cache = encoded_x_vec.astype(np.float32)
if self.contains_y:
encoded_y_vec = self.learner.encode_y(y_vec)
if not isinstance(encoded_y_vec, np.ndarray):
encoded_y_vec = np.array(encoded_y_vec)
self.y_cache = encoded_y_vec.astype(np.float32)
self.cache_index = 0
if self.x_cache is not None and self.cache_index < self.x_cache.shape[0]:
if self.contains_y:
example = (self.x_cache[self.cache_index, ...],
self.y_cache[self.cache_index])
else:
example = (self.x_cache[self.cache_index, ...], None)
self.cache_index += 1
return example
else:
return (None, None)
| [
"torch.is_tensor",
"numpy.array",
"random.shuffle"
] | [((747, 767), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (762, 767), False, 'import torch\n'), ((1409, 1429), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1424, 1429), False, 'import torch\n'), ((382, 398), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (390, 398), True, 'import numpy as np\n'), ((526, 542), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (534, 542), True, 'import numpy as np\n'), ((1061, 1077), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (1069, 1077), True, 'import numpy as np\n'), ((1205, 1221), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (1213, 1221), True, 'import numpy as np\n'), ((4049, 4072), 'numpy.array', 'np.array', (['encoded_x_vec'], {}), '(encoded_x_vec)\n', (4057, 4072), True, 'import numpy as np\n'), ((7439, 7462), 'numpy.array', 'np.array', (['encoded_x_vec'], {}), '(encoded_x_vec)\n', (7447, 7462), True, 'import numpy as np\n'), ((3704, 3724), 'random.shuffle', 'random.shuffle', (['temp'], {}), '(temp)\n', (3718, 3724), False, 'import random\n'), ((3825, 3846), 'random.shuffle', 'random.shuffle', (['x_vec'], {}), '(x_vec)\n', (3839, 3846), False, 'import random\n'), ((4345, 4368), 'numpy.array', 'np.array', (['encoded_y_vec'], {}), '(encoded_y_vec)\n', (4353, 4368), True, 'import numpy as np\n'), ((7094, 7114), 'random.shuffle', 'random.shuffle', (['temp'], {}), '(temp)\n', (7108, 7114), False, 'import random\n'), ((7215, 7236), 'random.shuffle', 'random.shuffle', (['x_vec'], {}), '(x_vec)\n', (7229, 7236), False, 'import random\n'), ((7735, 7758), 'numpy.array', 'np.array', (['encoded_y_vec'], {}), '(encoded_y_vec)\n', (7743, 7758), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from datetime import timedelta, time, date
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.timezone import now, localtime
from base.models import User, UserProfile, ProfileRequest
from farnsworth import pre_fill
from managers.models import Manager
from utils.variables import MESSAGES
from workshift.fill import REGULAR_WORKSHIFTS, WEEK_LONG, HUMOR_WORKSHIFTS, \
BATHROOM_WORKSHIFTS
from workshift.models import *
from workshift.forms import *
from workshift.fields import DAY_CHOICES
from workshift.cron import CollectBlownCronJob, UpdateWeeklyStandings
from workshift import utils, signals
class TestStart(TestCase):
"""
Tests the behavior of the workshift website before any semester has been
initialized. Also tests that initializing a semester works correctly.
"""
def setUp(self):
self.u = User.objects.create_user(username="u", password="<PASSWORD>")
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.wu.first_name, self.wu.last_name = "Cooperative", "User"
self.wu.save()
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.wu),
workshift_manager=True,
)
self.assertTrue(self.client.login(username="wu", password="<PASSWORD>"))
def test_unauthenticated(self):
self.client.logout()
url = reverse("workshift:view_semester")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("login") + "?next=" + url)
def test_before(self):
url = reverse("workshift:view_semester")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("workshift:start_semester"))
self.client.logout()
self.assertTrue(self.client.login(username="u", password="pwd"))
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("homepage"))
def test_start(self):
url = reverse("workshift:start_semester")
response = self.client.post(url, {
"semester-season": Semester.SUMMER,
"semester-year": 2014,
"semester-rate": 13.30,
"semester-policy": "http://bsc.coop",
"semester-start_date": date(2014, 5, 22),
"semester-end_date": date(2014, 8, 15),
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
self.assertEqual(
1,
Semester.objects.filter(year=2014).filter(season=Semester.SUMMER).count(),
)
semester = Semester.objects.get(year=2014, season=Semester.SUMMER)
self.assertEqual(
2,
WorkshiftProfile.objects.filter(semester=semester).count(),
)
self.assertEqual(
1,
WorkshiftPool.objects.filter(semester=semester).count(),
)
pool = WorkshiftPool.objects.get(semester=semester)
self.assertEqual(PoolHours.objects.filter(pool=pool).count(), 2)
pool_hours = PoolHours.objects.filter(pool=pool)
for profile in WorkshiftProfile.objects.filter(semester=semester):
self.assertEqual(1, profile.pool_hours.count())
self.assertIn(profile.pool_hours.all()[0], pool_hours)
self.assertEqual(1, profile.pool_hours.filter(pool=pool).count())
class TestAssignment(TestCase):
"""
Test the functionality of workshift.utils.auto_assign_shifts. This should
include respecting member's shift preferences and schedules.
"""
def setUp(self):
self.u = User.objects.create_user(username="u0")
today = localtime(now()).date()
self.semester = Semester.objects.create(
year=today.year,
start_date=today,
end_date=today + timedelta(days=6),
)
self.profile = WorkshiftProfile.objects.get(
user=self.u,
semester=self.semester,
)
self.p1 = WorkshiftPool.objects.get(
is_primary=True,
semester=self.semester,
)
self.p2 = WorkshiftPool.objects.create(
title="Alternate Workshift",
semester=self.semester,
)
self.wtype1 = WorkshiftType.objects.create(
title="Like Type",
)
self.wtype2 = WorkshiftType.objects.create(
title="Indifferent Type",
)
self.wtype3 = WorkshiftType.objects.create(
title="Dislike Type",
)
preference1 = WorkshiftRating.objects.create(
rating=WorkshiftRating.LIKE,
workshift_type=self.wtype1,
)
preference2 = WorkshiftRating.objects.create(
rating=WorkshiftRating.INDIFFERENT,
workshift_type=self.wtype2,
)
preference3 = WorkshiftRating.objects.create(
rating=WorkshiftRating.DISLIKE,
workshift_type=self.wtype3,
)
self.profile.ratings = [preference1, preference2, preference3]
self.profile.save()
utils.make_workshift_pool_hours(semester=self.semester)
def test_auto_assign_one(self):
"""
Assign one shift to a member.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
pool_hours.hours,
)
def test_pre_assigned(self):
"""
Test that assignment behaves correctly when members are already assigned
to other workshifts.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype3,
pool=self.p1,
hours=1,
)
shift2.current_assignees = [self.profile]
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([self.profile], unfinished)
self.assertNotIn(self.profile, shift1.current_assignees.all())
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
1,
)
def test_auto_assign_one_overflow(self):
"""
Don't assign one shift to a member because it pushes them over their
weekly requirement.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=6,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([self.profile], unfinished)
self.assertNotIn(self.profile, shift1.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == None
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
0,
)
def test_auto_assign_two(self):
"""
Assign two shifts to a member.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=2,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=3,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
self.assertIn(self.profile, shift2.current_assignees.all())
for shift in [shift1, shift2]:
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
pool_hours.hours,
)
def test_auto_assign_two_preferred(self):
"""
Assign one of two shifts to a member.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype2,
pool=self.p1,
hours=5,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
self.assertNotIn(self.profile, shift2.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift2)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == None
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
pool_hours.hours,
)
def test_auto_assign_two_overflow(self):
"""
Assign a preferred shift to a member, but don't assign the other because
it pushes them over their weekly requirement.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=3,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype2,
pool=self.p1,
hours=3,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([self.profile], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
self.assertNotIn(self.profile, shift2.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift2)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == None
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
3,
)
def _test_auto_assign_fifty(self):
"""
Assign fifty members to fifty shifts, with each shift providing 5 hours
of workshift. Ensures that the assignments don't mysteriously break or
run for an extremely long time for medium-sized houses.
"""
shifts = []
for i in range(50):
shifts.append(
RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
)
for i in range(1, 50):
user = User.objects.create_user(username="u{0}".format(i))
utils.make_workshift_pool_hours(semester=self.semester)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
for shift in shifts:
self.assertEqual(1, shift.current_assignees.count())
def _test_auto_assign_one_hundred_and_fifty(self):
"""
Assign 150 members to 150 shifts, with each shift providing 5 hours
of workshift. Ensures that the assignments don't mysteriously break or
run for an extremely long time for large houses.
"""
shifts = []
for i in range(150):
shifts.append(
RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
)
for i in range(1, 150):
user = User.objects.create_user(username="u{0}".format(i))
utils.make_workshift_pool_hours(semester=self.semester)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
for shift in shifts:
self.assertEqual(1, shift.current_assignees.count())
def test_pre_fill_and_assign(self):
"""
Tests that shifts can be correctly assigned after farnsworth/pre_fill.py
is run. This is a good test of how the assignment code functions "in the
wild," rather than with many duplicates of the same shift.
"""
users = []
for i in range(1, 50):
users.append(User.objects.create_user(username="u{0}".format(i)))
pre_fill.main(["--managers", "--workshift"])
utils.make_workshift_pool_hours(semester=self.semester)
# Assign manager shifts beforehand
for user, manager in zip(users, Manager.objects.all()):
manager.incumbent = UserProfile.objects.get(user=user)
manager.save()
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
def _test_pre_fill_and_assign_humor(self):
"""
Tests that humor shifts can be correctly assigned after
farnsworth/pre_fill.py is run.
"""
for i in range(1, 50):
user = User.objects.create_user(username="u{0}".format(i))
pre_fill.main(["--managers", "--workshift"])
utils.make_workshift_pool_hours(semester=self.semester)
# Assign manager shifts beforehand
manager_shifts = RegularWorkshift.objects.filter(
pool=self.p1, workshift_type__auto_assign=False,
)
profiles = WorkshiftProfile.objects.all()
for profile, shift in zip(profiles, manager_shifts):
shift.current_assignees.add(profile)
shift.save()
unfinished = utils.auto_assign_shifts(
self.semester, pool=WorkshiftPool.objects.get(title="Humor Shift")
)
self.assertEqual([], unfinished)
class TestUtils(TestCase):
"""
Tests most of the various functions within workshift.utils.
"""
def setUp(self):
self.u = User.objects.create_user(username="u", first_name="N", last_name="M")
today = localtime(now()).date()
self.semester = Semester.objects.create(
year=today.year,
season=Semester.SUMMER,
start_date=today,
end_date=today + timedelta(weeks=18),
)
self.profile = WorkshiftProfile.objects.get(user=self.u)
self.p1 = WorkshiftPool.objects.get(
is_primary=True,
semester=self.semester,
)
self.p1.sign_out_cutoff = 24
self.p1.verify_cutoff = 2
self.p1.save()
self.p2 = WorkshiftPool.objects.create(
title="Alternate Workshift",
semester=self.semester,
)
def test_cron_blown(self):
CollectBlownCronJob().do()
def test_cron_standings(self):
UpdateWeeklyStandings().do()
def test_get_year_season(self):
year, season = utils.get_year_season()
self.assertLess(abs(year - localtime(now()).date().year), 2)
self.assertIn(season, [Semester.SPRING, Semester.SUMMER, Semester.FALL])
def test_starting_month(self):
# Starting in Summer, Fall, and Spring
self.assertEqual(
(2015, Semester.SPRING),
utils.get_year_season(day=date(2014, 12, 20)),
)
self.assertEqual(
(2015, Semester.SPRING),
utils.get_year_season(day=date(2015, 3, 20)),
)
self.assertEqual(
(2014, Semester.SUMMER),
utils.get_year_season(day=date(2014, 4, 1)),
)
self.assertEqual(
(2014, Semester.SUMMER),
utils.get_year_season(day=date(2014, 7, 20)),
)
self.assertEqual(
(2014, Semester.FALL),
utils.get_year_season(day=date(2014, 8, 1)),
)
self.assertEqual(
(2014, Semester.FALL),
utils.get_year_season(day=date(2014, 10, 20)),
)
def test_start_end(self):
self.assertEqual(
(date(2014, 1, 20), date(2014, 5, 17)),
utils.get_semester_start_end(2014, Semester.SPRING),
)
self.assertEqual(
(date(2014, 5, 25), date(2014, 8, 16)),
utils.get_semester_start_end(2014, Semester.SUMMER),
)
self.assertEqual(
(date(2014, 8, 24), date(2014, 12, 20)),
utils.get_semester_start_end(2014, Semester.FALL),
)
def test_make_pool_hours_all(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours()
self.assertEqual(2, PoolHours.objects.count())
self.assertEqual(2, self.profile.pool_hours.count())
def test_make_pool_hours_profile(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours(
semester=self.semester,
profiles=[],
)
self.assertEqual(0, PoolHours.objects.count())
self.assertEqual(0, self.profile.pool_hours.count())
utils.make_workshift_pool_hours(
semester=self.semester,
profiles=[self.profile],
)
self.assertEqual(2, PoolHours.objects.count())
self.assertEqual(2, self.profile.pool_hours.count())
def test_make_pool_hours_pools(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours(
semester=self.semester,
pools=[self.p1],
)
self.assertEqual(1, PoolHours.objects.count())
self.assertEqual(1, self.profile.pool_hours.count())
utils.make_workshift_pool_hours(
semester=self.semester,
pools=[self.p2],
)
self.assertEqual(2, PoolHours.objects.count())
self.assertEqual(2, self.profile.pool_hours.count())
def test_make_pool_hours_primary(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours(
semester=self.semester,
primary_hours=6,
)
self.assertEqual(6, PoolHours.objects.get(pool=self.p1).hours)
self.assertEqual(self.p2.hours, PoolHours.objects.get(pool=self.p2).hours)
def test_can_manage(self):
pass
def test_is_available(self):
pass
def test_make_instances(self):
wtype = WorkshiftType.objects.create(
title="Test Make Instances",
)
# Disconnect the handler and run make_instances ourselves
models.signals.post_save.disconnect(
signals.create_workshift_instances, sender=RegularWorkshift
)
shift = RegularWorkshift.objects.create(
workshift_type=wtype,
pool=self.p1,
day=4,
hours=7,
)
shift.current_assignees = [self.profile]
today = localtime(now()).date()
WorkshiftInstance.objects.create(
weekly_workshift=shift,
date=today - timedelta(today.weekday()),
)
instances = utils.make_instances(
semester=self.semester,
shifts=[shift],
)
models.signals.post_save.connect(
signals.create_workshift_instances, sender=RegularWorkshift
)
for instance in instances:
self.assertEqual(wtype.title, instance.title)
self.assertEqual(shift, instance.weekly_workshift)
self.assertEqual(shift.hours, instance.hours)
self.assertEqual(shift.hours, instance.intended_hours)
self.assertEqual(1, instance.logs.count())
self.assertEqual(
set([shift.day]),
set(i.date.weekday() for i in instances),
)
def test_collect_blown(self):
utils.make_workshift_pool_hours()
self.assertEqual(
([], [], []),
utils.collect_blown(),
)
self.assertEqual(
([], [], []),
utils.collect_blown(semester=self.semester),
)
moment = localtime(now().replace(hour=20, minute=0, second=0, microsecond=0))
past = moment - timedelta(days=1)
WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Closed",
pool=self.p1,
),
closed=True,
date=past.date(),
semester=self.semester,
)
to_close = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="To be closed",
pool=self.p1,
),
date=past.date(),
semester=self.semester,
)
WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Not Blown",
pool=self.p1,
),
date=moment.date(),
semester=self.semester,
)
blown = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Blown",
pool=self.p1,
),
date=past.date(),
workshifter=self.profile,
semester=self.semester,
)
WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Edge Case 1: Not Closed",
end_time=moment.time(),
pool=self.p1,
),
date=moment.date(),
semester=self.semester,
)
edge_datetime = moment - timedelta(hours=self.p1.verify_cutoff, minutes=1)
edge_case_2 = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Edge Case 2: Closed",
end_time=edge_datetime.time(),
pool=self.p1,
),
date=edge_datetime.date(),
)
signed_out_1 = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Workshifter signed out early enough",
pool=self.p1,
),
date=past.date(),
semester=self.semester,
)
signed_out_2 = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Workshifter signed out too late",
pool=self.p1,
),
liable=self.profile,
date=past.date(),
semester=self.semester,
)
self.assertEqual(
([to_close, edge_case_2, signed_out_1], [], [blown, signed_out_2]),
utils.collect_blown(moment=moment),
)
class TestViews(TestCase):
"""
Tests a few basic things about the application: That all the pages can load
correctly, and that they contain the content that is expected.
"""
def setUp(self):
moment = localtime(now())
today = moment.date()
self.sem = Semester.objects.create(
year=today.year,
start_date=today,
end_date=today + timedelta(days=6),
)
self.u = User.objects.create_user(username="u", password="<PASSWORD>")
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.wu.first_name, self.wu.last_name = "Cooperative", "User"
self.wu.save()
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.wu),
workshift_manager=True,
)
self.pool = WorkshiftPool.objects.get(
semester=self.sem,
is_primary=True,
)
self.wprofile = WorkshiftProfile.objects.get(user=self.wu)
self.wtype = WorkshiftType.objects.create(
title="Test Posts",
description="Test WorkshiftType Description",
quick_tips="Test Quick Tips",
)
self.shift = RegularWorkshift.objects.create(
workshift_type=self.wtype,
pool=self.pool,
day=today.weekday(),
start_time=moment,
end_time=moment + timedelta(hours=2),
)
self.shift.current_assignees = [self.wprofile]
self.instance = WorkshiftInstance.objects.get(
weekly_workshift=self.shift,
)
info = InstanceInfo.objects.create(
title="Test One Time Shift",
pool=self.pool,
description="One Time Description",
)
self.once = WorkshiftInstance.objects.create(
info=info,
date=today + timedelta(days=7),
workshifter=self.wprofile,
)
self.sle0 = ShiftLogEntry.objects.create(
person=self.wprofile,
note="Test Shift Log #0",
entry_type=ShiftLogEntry.ASSIGNED,
)
self.sle1 = ShiftLogEntry.objects.create(
person=self.wprofile,
note="Test Shift Log #1",
entry_type=ShiftLogEntry.SIGNOUT,
)
self.sle2 = ShiftLogEntry.objects.create(
person=self.wprofile,
note="Test Shift Log #2",
entry_type=ShiftLogEntry.SIGNIN,
)
self.sle3 = ShiftLogEntry.objects.create(
person=self.wprofile,
note="Test Shift Log #3",
entry_type=ShiftLogEntry.VERIFY,
)
self.sle4 = ShiftLogEntry.objects.create(
person=self.wprofile,
note="Test Shift Log #4",
entry_type=ShiftLogEntry.BLOWN,
)
self.once.logs = [self.sle0, self.sle1, self.sle2, self.sle3, self.sle4]
self.instance.logs = [self.sle0, self.sle1, self.sle2, self.sle3, self.sle4]
hours = self.wprofile.pool_hours.get(pool=self.pool)
hours.first_fine_date = 13.00
hours.save()
self.assertTrue(self.client.login(username="wu", password="<PASSWORD>"))
def test_no_profile(self):
self.client.logout()
self.client.login(username='u', password='<PASSWORD>')
urls = [
reverse("workshift:list_types"),
reverse("workshift:view_type", kwargs={"pk": self.wtype.pk}),
reverse("workshift:view_semester"),
reverse("workshift:profile", kwargs={"targetUsername": self.wprofile.user.username}),
reverse("workshift:view_shift", kwargs={"pk": self.shift.pk}),
reverse("workshift:view_instance", kwargs={"pk": self.instance.pk}),
reverse("workshift:view_instance", kwargs={"pk": self.once.pk}),
reverse("workshift:view_open"),
reverse("workshift:semester_info"),
reverse("workshift:profiles"),
]
for url in urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_views_load(self):
urls = [
reverse("workshift:start_semester"),
reverse("workshift:list_types"),
reverse("workshift:view_type", kwargs={"pk": self.wtype.pk}),
reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk}),
]
for url in urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
urls = [
("workshift:view_semester", {}),
("workshift:semester_info", {}),
("workshift:profile", {"targetUsername": self.wprofile.user.username}),
("workshift:edit_profile", {"targetUsername": self.wprofile.user.username}),
("workshift:preferences", {"targetUsername": self.wprofile.user.username}),
("workshift:manage", {}),
("workshift:fill_shifts", {}),
("workshift:assign_shifts", {}),
("workshift:add_shift", {}),
("workshift:adjust_hours", {}),
("workshift:add_workshifter", {}),
("workshift:view_shift", {"pk": self.shift.pk}),
("workshift:edit_shift", {"pk": self.shift.pk}),
("workshift:view_instance", {"pk": self.instance.pk}),
("workshift:edit_instance", {"pk": self.instance.pk}),
("workshift:view_instance", {"pk": self.once.pk}),
("workshift:edit_instance", {"pk": self.once.pk}),
("workshift:view_open", {}),
("workshift:profiles", {}),
("workshift:add_pool", {}),
("workshift:view_pool", {"pk": self.pool.pk}),
("workshift:edit_pool", {"pk": self.pool.pk}),
]
for name, kwargs in urls:
url = reverse(name, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
kwargs["sem_url"] = "{0}{1}".format(self.sem.season, self.sem.year)
url = reverse(name, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_type_list(self):
url = reverse("workshift:list_types")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.wtype.title)
self.assertContains(response, "Week long")
self.assertNotContains(response, self.wtype.quick_tips)
self.assertNotContains(response, self.wtype.description)
def test_type(self):
url = reverse("workshift:view_type", kwargs={"pk": self.wtype.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.wtype.title)
self.assertContains(response, self.wtype.quick_tips)
self.assertContains(response, self.wtype.description)
def test_type_edit(self):
url = reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.wtype.title)
self.assertContains(response, self.wtype.quick_tips)
self.assertContains(response, self.wtype.description)
def test_shift(self):
url = reverse("workshift:view_shift", kwargs={"pk": self.shift.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.wtype.title)
self.assertContains(response, str(self.shift.hours))
self.assertContains(response, self.shift.workshift_type.quick_tips)
self.assertContains(response, self.shift.workshift_type.description)
for assignee in self.shift.current_assignees.all():
self.assertContains(response, assignee.user.get_full_name())
def test_edit_shift(self):
url = reverse("workshift:edit_shift", kwargs={"pk": self.shift.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.wtype.title)
self.assertContains(response, str(self.shift.hours))
for assignee in self.shift.current_assignees.all():
self.assertContains(response, assignee.user.get_full_name())
def test_instance(self):
url = reverse("workshift:view_instance", kwargs={"pk": self.instance.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
self.instance.weekly_workshift.workshift_type.title,
)
self.assertContains(
response,
self.instance.weekly_workshift.pool.title,
)
self.assertContains(response, self.instance.workshifter.user.get_full_name())
self.assertContains(response, str(self.instance.hours))
self.assertContains(response, self.sle0.note)
self.assertContains(response, self.sle1.note)
self.assertContains(response, self.sle2.note)
self.assertContains(response, self.sle3.note)
self.assertContains(response, self.sle4.note)
def test_edit_instance(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.instance.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
self.instance.weekly_workshift.workshift_type.title,
)
self.assertContains(
response,
self.instance.weekly_workshift.pool.title,
)
self.assertContains(
response,
str(self.instance.date),
)
self.assertContains(
response,
self.instance.workshifter.user.get_full_name(),
)
self.assertContains(
response,
str(self.instance.hours),
)
def test_one_time(self):
url = reverse("workshift:view_instance", kwargs={"pk": self.once.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.once.title)
self.assertContains(response, self.once.pool.title)
self.assertContains(response, self.once.description)
self.assertContains(response, str(self.once.hours))
self.assertContains(response, self.once.workshifter.user.get_full_name())
self.assertContains(response, self.sle0.note)
self.assertContains(response, self.sle1.note)
self.assertContains(response, self.sle2.note)
self.assertContains(response, self.sle3.note)
self.assertContains(response, self.sle4.note)
def test_edit_one_time(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.once.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.once.title)
self.assertContains(response, self.once.pool.title)
self.assertContains(response, self.once.description)
self.assertContains(response, self.once.hours)
self.assertContains(
response,
self.once.workshifter.user.get_full_name(),
)
def test_semester_view(self):
url = reverse("workshift:view_semester")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_semester_no_prev(self):
today = self.sem.start_date
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
url = reverse("workshift:view_semester")
response = self.client.get(url + "?day=" + today.strftime("%F"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, today.strftime("%A, %B"))
self.assertContains(response, today.strftime("%d, %Y"))
self.assertNotContains(response, "?day=" + yesterday.strftime("%F"))
self.assertContains(response, "?day=" + tomorrow.strftime("%F"))
def test_semester_no_next(self):
today = self.sem.end_date
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
url = reverse("workshift:view_semester")
response = self.client.get(url + "?day=" + today.strftime("%F"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, today.strftime("%A, %B"))
self.assertContains(response, today.strftime("%d, %Y"))
self.assertContains(response, "?day=" + yesterday.strftime("%F"))
self.assertNotContains(response, "?day=" + tomorrow.strftime("%F"))
def test_semester_bad_day(self):
url = reverse("workshift:view_semester")
response = self.client.get(url + "?day=2014")
self.assertEqual(response.status_code, 200)
response = self.client.get(url + "?day=abcd")
self.assertEqual(response.status_code, 200)
response = self.client.get(url + "?day=2014-20")
self.assertEqual(response.status_code, 200)
response = self.client.get(url + "?day=2014-100-100")
self.assertEqual(response.status_code, 200)
def test_auto_assign(self):
self.test_clear_assignees()
url = reverse("workshift:assign_shifts")
response = self.client.post(url, {
"pool": self.pool.pk,
"auto_assign_shifts": "",
})
self.assertRedirects(response, url)
uprofile = WorkshiftProfile.objects.get(user=self.u)
self.assertEqual(
RegularWorkshift.objects.get(pk=self.shift.pk),
RegularWorkshift.objects.get(current_assignees=uprofile)
)
def test_random_assign(self):
for instance in WorkshiftInstance.objects.all():
instance.workshifter = None
instance.save()
WorkshiftProfile.objects.exclude(pk=self.wprofile.pk).delete()
url = reverse("workshift:assign_shifts")
response = self.client.post(url, {
"pool": self.pool.pk,
"random_assign_instances": "",
})
self.assertRedirects(response, url)
self.assertEqual(
1,
WorkshiftInstance.objects.filter(workshifter=self.wprofile).count()
)
def test_clear_assignees(self):
url = reverse("workshift:assign_shifts")
response = self.client.post(url, {
"pool": self.pool.pk,
"clear_assignments": "",
})
self.assertRedirects(response, url)
self.assertEqual(
1,
RegularWorkshift.objects.filter(current_assignees=self.wprofile).count()
)
self.assertEqual(
2, # self.instance, Workshift Manager
WorkshiftInstance.objects.filter(workshifter=self.wprofile).count(),
)
def test_fine_date(self):
url = reverse("workshift:fine_date")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"workshifters_table",
)
self.assertContains(
response,
self.pool.title,
)
class TestPreferences(TestCase):
"""
Tests the various elements of the workshift preferences page.
"""
def setUp(self):
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.wu.first_name, self.wu.last_name = "Cooperative", "User"
self.wu.save()
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.wu),
workshift_manager=True,
)
today = localtime(now()).date()
self.sem = Semester.objects.create(
year=today.year, start_date=today,
end_date=today + timedelta(days=7),
)
self.pool = WorkshiftPool.objects.get(
semester=self.sem,
)
self.wprofile = WorkshiftProfile.objects.get(user=self.wu)
self.w1 = WorkshiftType.objects.create(
title="Clean Pots",
description="Clean and sanitize all cooking materials in the dish room",
quick_tips="Use 6 tablets of quartz!",
)
self.w2 = WorkshiftType.objects.create(
title="Clean Dishes",
description="Clean and santize all eating materials in the dish room",
quick_tips="Make sure there is liquid for the sanitizer!",
)
self.w3 = WorkshiftType.objects.create(
title="Trash",
description="Take out the trash, everyone has to do this one.",
rateable=False,
)
self.assertTrue(self.client.login(username="wu", password="<PASSWORD>"))
self.url = reverse("workshift:preferences", kwargs={"targetUsername": self.wprofile.user.username})
def test_preferences_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.w1.title)
self.assertContains(response, self.w2.title)
self.assertContains(response, self.w1.description)
self.assertContains(response, self.w2.description)
self.assertContains(
response,
'name="time-TOTAL_FORMS" type="hidden" value="1"',
)
self.assertContains(
response,
'name="time-INITIAL_FORMS" type="hidden" value="0"',
)
self.assertContains(
response,
'name="time-MAX_NUM_FORMS" type="hidden" value="50"',
)
self.assertEqual(self.wprofile.ratings.count(), 0)
def test_preferences_post(self):
response = self.client.post(self.url, {
"rating-{}-rating".format(self.w1.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w2.pk): WorkshiftRating.DISLIKE,
"time-0-preference": TimeBlock.BUSY,
"time-0-day": DAY_CHOICES[0][0], # Monday
"time-0-start_time": "8:00 AM",
"time-0-end_time": "5:00 PM",
"time-1-preference": TimeBlock.BUSY,
"time-1-day": DAY_CHOICES[-1][0], # Sunday
"time-1-start_time": "4:00 PM",
"time-1-end_time": "9:00 PM",
"time-2-preference": TimeBlock.PREFERRED,
"time-2-day": DAY_CHOICES[1][0], # Tuesday
"time-2-start_time": "6:00 PM",
"time-2-end_time": "10:00 PM",
"time-TOTAL_FORMS": 3,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
"note-note": "Dishes are fun, pots are cathartic.",
}, follow=True)
self.assertRedirects(response, self.url)
self.assertContains(response, "Preferences saved.")
self.assertEqual(self.wprofile.ratings.count(), 2)
for rating, wtype, liked in zip(
self.wprofile.ratings.all(),
[self.w1, self.w2],
[WorkshiftRating.LIKE, WorkshiftRating.DISLIKE],
):
self.assertEqual(rating.workshift_type, wtype)
self.assertEqual(rating.rating, liked)
self.assertEqual(self.wprofile.time_blocks.count(), 3)
for block, preference, day, start, end, in zip(
self.wprofile.time_blocks.all(),
[TimeBlock.BUSY, TimeBlock.BUSY, TimeBlock.PREFERRED],
[DAY_CHOICES[0][0], DAY_CHOICES[-1][0], DAY_CHOICES[1][0]],
[time(8, 0, 0), time(16, 0, 0), time(18, 0, 0)],
[time(17, 0, 0), time(21, 0, 0), time(22, 0, 0)],
):
self.assertEqual(block.preference, preference)
self.assertEqual(block.day, day)
self.assertEqual(block.start_time, start)
self.assertEqual(block.end_time, end)
self.assertEqual(
"Dishes are fun, pots are cathartic.",
WorkshiftProfile.objects.get(user=self.wu).note,
)
def test_no_note(self):
response = self.client.post(self.url, {
"rating-{}-rating".format(self.w1.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w2.pk): WorkshiftRating.DISLIKE,
"time-TOTAL_FORMS": 0,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
}, follow=True)
self.assertRedirects(response, self.url)
self.assertContains(response, "Preferences saved.")
def test_preferences_after_add(self):
self.test_no_note()
self.assertEqual(self.wprofile.ratings.count(), 2)
w4 = WorkshiftType.objects.create(
title="Added late",
description="Workshift added after preferences entered",
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, w4.title)
response = self.client.post(self.url, {
"rating-{}-rating".format(self.w1.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w2.pk): WorkshiftRating.DISLIKE,
"rating-{}-rating".format(w4.pk): WorkshiftRating.LIKE,
"time-TOTAL_FORMS": 0,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
}, follow=True)
self.assertRedirects(response, self.url)
self.assertContains(response, "Preferences saved.")
self.assertEqual(self.wprofile.ratings.count(), 3)
rating = self.wprofile.ratings.get(workshift_type=w4)
self.assertEqual(rating.rating, WorkshiftRating.LIKE)
def test_delete_rating(self):
"""
Ensure that users cannot delete their rating preferences.
"""
response = self.client.post(self.url, {
"rating-{}-rating".format(self.w1.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w2.pk): WorkshiftRating.LIKE,
"time-TOTAL_FORMS": 0,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
}, follow=True)
self.assertRedirects(response, self.url)
response = self.client.post(self.url, {
"rating-1-rating": WorkshiftRating.LIKE,
"time-TOTAL_FORMS": 0,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.wprofile.ratings.count(), 2)
def test_add_rating(self):
"""
Ensure that users cannot add extra rating preferences.
"""
self.client.post(self.url, {
"rating-{}-rating".format(self.w1.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w2.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w3.pk + 1): WorkshiftRating.LIKE,
"time-TOTAL_FORMS": 0,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
})
self.assertEqual(self.wprofile.ratings.count(), 2)
def test_unrateable(self):
"""
Ensure that users cannot rate unrateable shifts.
"""
self.client.post(self.url, {
"rating-{}-rating".format(self.w1.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w2.pk): WorkshiftRating.LIKE,
"rating-{}-rating".format(self.w3.pk): WorkshiftRating.LIKE,
"time-TOTAL_FORMS": 0,
"time-INITIAL_FORMS": 0,
"time-MAX_NUM_FORMS": 50,
})
self.assertEqual(self.wprofile.ratings.count(), 2)
class TestInteractForms(TestCase):
"""
Tests the functionality of the buttons for marking shifts as blown,
verifying shifts, signing in and out of shifts at appropriate times, etc.
"""
def setUp(self):
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.u = User.objects.create_user(username="u", password="<PASSWORD>")
self.ou = User.objects.create_user(username="ou", password="<PASSWORD>")
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.wu),
workshift_manager=True,
)
today = localtime(now()).date()
self.sem = Semester.objects.create(
year=today.year, start_date=today,
end_date=today + timedelta(days=7),
)
self.pool = WorkshiftPool.objects.get(
semester=self.sem,
)
self.pool.any_blown = True
self.pool.save()
self.wp = WorkshiftProfile.objects.get(user=self.wu)
self.up = WorkshiftProfile.objects.get(user=self.u)
self.op = WorkshiftProfile.objects.get(user=self.ou)
self.wtype = WorkshiftType.objects.create(
title="Test Posts",
description="Test WorkshiftType Description",
quick_tips="Test Quick Tips",
)
self.shift = RegularWorkshift.objects.create(
workshift_type=self.wtype,
pool=self.pool,
day=DAY_CHOICES[0][0],
start_time=localtime(now()),
end_time=time(23, 59, 59),
)
self.instance = WorkshiftInstance.objects.create(
weekly_workshift=self.shift,
date=today,
workshifter=self.up,
verify=OTHER_VERIFY,
)
info = InstanceInfo.objects.create(
title="Test One Time Shift",
pool=self.pool,
)
self.once = WorkshiftInstance.objects.create(
info=info,
date=today,
)
self.sle0 = ShiftLogEntry.objects.create(
person=self.wp,
entry_type=ShiftLogEntry.ASSIGNED,
)
self.instance.logs = [self.sle0]
self.once.logs = [self.sle0]
self.instance.save()
self.once.save()
def test_verify(self):
self.assertTrue(self.client.login(username="u", password="<PASSWORD>"))
form = VerifyShiftForm({"pk": self.instance.pk}, profile=self.wp)
form.is_valid()
self.assertTrue(form.is_valid())
self.assertIsInstance(form.save(), WorkshiftInstance)
log = self.instance.logs.filter(entry_type=ShiftLogEntry.VERIFY)
self.assertEqual(1, log.count())
self.assertEqual(log[0].person, self.wp)
form = VerifyShiftForm({"pk": self.once.pk}, profile=self.wp)
self.assertFalse(form.is_valid())
self.assertIn("Workshift is not filled.", form.errors["pk"])
def test_no_self_verify(self):
self.pool.save()
self.assertTrue(self.client.login(username="u", password="<PASSWORD>"))
form = VerifyShiftForm({"pk": self.instance.pk}, profile=self.up)
self.assertFalse(form.is_valid())
self.assertIn("Workshifter cannot verify self.", form.errors["pk"])
self.assertTrue(self.client.login(username="ou", password="<PASSWORD>"))
form = VerifyShiftForm({"pk": self.instance.pk}, profile=self.op)
form.is_valid()
self.assertTrue(form.is_valid())
self.assertIsInstance(form.save(), WorkshiftInstance)
log = self.instance.logs.filter(entry_type=ShiftLogEntry.VERIFY)
self.assertEqual(1, log.count())
self.assertEqual(log[0].person, self.op)
def test_blown(self):
self.assertTrue(self.client.login(username="ou", password="<PASSWORD>"))
form = BlownShiftForm({"pk": self.instance.pk}, profile=self.op)
self.assertTrue(form.is_valid())
self.assertIsInstance(form.save(), WorkshiftInstance)
log = self.instance.logs.filter(entry_type=ShiftLogEntry.BLOWN)
self.assertEqual(1, log.count())
self.assertEqual(log[0].person, self.op)
form = BlownShiftForm({"pk": self.once.pk}, profile=self.op)
self.assertFalse(form.is_valid())
self.assertIn("Workshift is not filled.", form.errors["pk"])
def test_manager_blown(self):
self.pool.any_blown = False
self.pool.save()
self.assertTrue(self.client.login(username="ou", password="<PASSWORD>"))
form = BlownShiftForm({"pk": self.instance.pk}, profile=self.op)
self.assertFalse(form.is_valid())
self.assertIn("You are not a workshift manager.", form.errors["pk"])
self.client.logout()
self.assertTrue(self.client.login(username="wu", password="<PASSWORD>"))
form = BlownShiftForm({"pk": self.instance.pk}, profile=self.wp)
self.assertTrue(form.is_valid())
self.assertIsInstance(form.save(), WorkshiftInstance)
log = self.instance.logs.filter(entry_type=ShiftLogEntry.BLOWN)
self.assertEqual(1, log.count())
self.assertEqual(log[0].person, self.wp)
def test_sign_in(self):
self.assertTrue(self.client.login(username="u", password="<PASSWORD>"))
form = SignInForm({"pk": self.once.pk}, profile=self.up)
self.assertTrue(form.is_valid())
self.assertIsInstance(form.save(), WorkshiftInstance)
log = self.once.logs.filter(entry_type=ShiftLogEntry.SIGNIN)
self.assertEqual(1, log.count())
self.assertEqual(log[0].person, self.up)
form = SignInForm({"pk": self.instance.pk}, profile=self.up)
self.assertFalse(form.is_valid())
self.assertIn("Workshift is currently filled.", form.errors["pk"])
def test_sign_out(self):
self.assertTrue(self.client.login(username="u", password="<PASSWORD>"))
form = SignOutForm({"pk": self.instance.pk}, profile=self.up)
self.assertTrue(form.is_valid())
self.assertIsInstance(form.save(), WorkshiftInstance)
log = self.instance.logs.filter(entry_type=ShiftLogEntry.SIGNOUT)
self.assertEqual(1, log.count())
self.assertEqual(log[0].person, self.up)
form = SignOutForm({"pk": self.once.pk}, profile=self.up)
self.assertFalse(form.is_valid())
self.assertEqual(["Not signed into workshift."], form.errors["pk"])
def test_missing_shift(self):
self.assertTrue(self.client.login(username="u", password="<PASSWORD>"))
form = SignOutForm({"pk": -1}, profile=self.up)
self.assertFalse(form.is_valid())
form = SignOutForm({"pk": 100}, profile=self.up)
self.assertFalse(form.is_valid())
form = SignOutForm({"pk": "a"}, profile=self.up)
self.assertFalse(form.is_valid())
def test_closed_shift(self):
self.once.closed = True
self.once.save()
form = SignOutForm({"pk": self.once.pk}, profile=self.up)
self.assertFalse(form.is_valid())
class TestPermissions(TestCase):
"""
Tests that different levels of users and management are only able to access
the pages they are expected to have permission to.
"""
def setUp(self):
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.mu = User.objects.create_user(username="mu", password="<PASSWORD>")
self.u = User.objects.create_user(username="u", password="<PASSWORD>")
self.ou = User.objects.create_user(username="ou", password="<PASSWORD>")
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.wu),
workshift_manager=True,
)
self.mm = Manager.objects.create(
title="Maintenance Manager",
incumbent=UserProfile.objects.get(user=self.mu),
)
moment = localtime(now())
today = moment.date()
self.sem = Semester.objects.create(
year=today.year, start_date=today,
end_date=today + timedelta(days=7),
)
self.pool = WorkshiftPool.objects.get(
semester=self.sem,
)
self.hi_pool = WorkshiftPool.objects.create(
semester=self.sem,
title="HI Hours",
hours=4,
weeks_per_period=0,
)
self.wp = WorkshiftProfile.objects.get(user=self.wu)
self.mp = WorkshiftProfile.objects.get(user=self.mu)
self.up = WorkshiftProfile.objects.get(user=self.u)
self.op = WorkshiftProfile.objects.get(user=self.ou)
self.wtype = WorkshiftType.objects.create(
title="Test Posts",
)
self.mtype = WorkshiftType.objects.create(
title="Maintenance Cleaning",
)
self.wshift = RegularWorkshift.objects.create(
workshift_type=self.wtype,
pool=self.pool,
day=DAY_CHOICES[0][0],
start_time=moment,
end_time=moment + timedelta(hours=2),
)
self.mshift = RegularWorkshift.objects.create(
workshift_type=self.mtype,
pool=self.hi_pool,
day=DAY_CHOICES[0][0],
start_time=moment,
end_time=moment + timedelta(hours=2),
)
self.winstance = WorkshiftInstance.objects.create(
weekly_workshift=self.wshift,
date=today,
workshifter=self.up,
)
self.minstance = WorkshiftInstance.objects.create(
weekly_workshift=self.mshift,
date=today,
workshifter=self.up,
)
info = InstanceInfo.objects.create(
title="Clean The Deck",
pool=self.pool,
description="Make sure to sing sailor tunes.",
)
self.wonce = WorkshiftInstance.objects.create(
info=info,
date=today,
workshifter=self.up,
)
info = InstanceInfo.objects.create(
title="Build A Deck",
pool=self.hi_pool,
description="Preferably in the shape of a pirate ship.",
)
self.monce = WorkshiftInstance.objects.create(
info=info,
date=today,
workshifter=self.up,
)
def test_workshift_manager(self):
self.assertTrue(self.client.login(username="wu", password="<PASSWORD>"))
urls = [
reverse("workshift:start_semester"),
reverse("workshift:view_semester"),
reverse("workshift:semester_info"),
reverse("workshift:profile", kwargs={"targetUsername": self.up.user.username}),
reverse("workshift:edit_profile", kwargs={"targetUsername": self.up.user.username}),
reverse("workshift:preferences", kwargs={"targetUsername": self.up.user.username}),
reverse("workshift:manage"),
reverse("workshift:fill_shifts"),
reverse("workshift:assign_shifts"),
reverse("workshift:adjust_hours"),
reverse("workshift:add_workshifter"),
reverse("workshift:fine_date"),
reverse("workshift:add_shift"),
reverse("workshift:edit_shift", kwargs={"pk": self.wshift.pk}),
reverse("workshift:edit_instance", kwargs={"pk": self.winstance.pk}),
reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk}),
reverse("workshift:edit_shift", kwargs={"pk": self.mshift.pk}),
reverse("workshift:edit_instance", kwargs={"pk": self.minstance.pk}),
reverse("workshift:edit_type", kwargs={"pk": self.mtype.pk}),
]
for url in urls:
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
def test_maintenance_manager(self):
self.assertTrue(self.client.login(username="mu", password="<PASSWORD>"))
urls = [
(False, reverse("workshift:start_semester")),
(True, reverse("workshift:view_semester")),
(True, reverse("workshift:semester_info")),
(True, reverse("workshift:profile", kwargs={"targetUsername": self.up.user.username})),
(False, reverse("workshift:preferences", kwargs={"targetUsername": self.up.user.username})),
(True, reverse("workshift:manage")),
(True, reverse("workshift:fill_shifts")),
(True, reverse("workshift:assign_shifts")),
(False, reverse("workshift:adjust_hours")),
(False, reverse("workshift:add_workshifter")),
(True, reverse("workshift:add_shift")),
(True, reverse("workshift:fine_date")),
(False, reverse("workshift:edit_shift", kwargs={"pk": self.wshift.pk})),
(False, reverse("workshift:edit_instance", kwargs={"pk": self.winstance.pk})),
(False, reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk})),
(True, reverse("workshift:edit_shift", kwargs={"pk": self.mshift.pk})),
(True, reverse("workshift:edit_instance", kwargs={"pk": self.minstance.pk})),
(True, reverse("workshift:edit_type", kwargs={"pk": self.mtype.pk})),
]
for okay, url in urls:
response = self.client.get(url, follow=True)
if okay:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("workshift:view_semester"))
self.assertContains(response, MESSAGES["ADMINS_ONLY"])
def test_user(self):
self.assertTrue(self.client.login(username="u", password="<PASSWORD>"))
urls = [
(False, reverse("workshift:start_semester")),
(True, reverse("workshift:view_semester")),
(True, reverse("workshift:semester_info")),
(True, reverse("workshift:profile", kwargs={"targetUsername": self.up.user.username})),
(False, reverse("workshift:edit_profile", kwargs={"targetUsername": self.up.user.username})),
(True, reverse("workshift:preferences", kwargs={"targetUsername": self.up.user.username})),
(False, reverse("workshift:manage")),
(False, reverse("workshift:fill_shifts")),
(False, reverse("workshift:assign_shifts")),
(False, reverse("workshift:adjust_hours")),
(False, reverse("workshift:add_workshifter")),
(False, reverse("workshift:add_shift")),
(False, reverse("workshift:fine_date")),
(False, reverse("workshift:edit_shift", kwargs={"pk": self.wshift.pk})),
(False, reverse("workshift:edit_instance", kwargs={"pk": self.winstance.pk})),
(False, reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk})),
(False, reverse("workshift:edit_shift", kwargs={"pk": self.mshift.pk})),
(False, reverse("workshift:edit_instance", kwargs={"pk": self.minstance.pk})),
(False, reverse("workshift:edit_type", kwargs={"pk": self.mtype.pk})),
]
for okay, url in urls:
response = self.client.get(url, follow=True)
if okay:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("workshift:view_semester"))
self.assertContains(response, MESSAGES["ADMINS_ONLY"])
def test_other_user(self):
self.assertTrue(self.client.login(username="ou", password="<PASSWORD>"))
urls = [
(False, reverse("workshift:start_semester")),
(True, reverse("workshift:view_semester")),
(True, reverse("workshift:semester_info")),
(True, reverse("workshift:profile", kwargs={"targetUsername": self.up.user.username})),
(False, reverse("workshift:edit_profile", kwargs={"targetUsername": self.up.user.username})),
(False, reverse("workshift:preferences", kwargs={"targetUsername": self.up.user.username})),
(False, reverse("workshift:manage")),
(False, reverse("workshift:fill_shifts")),
(False, reverse("workshift:assign_shifts")),
(False, reverse("workshift:adjust_hours")),
(False, reverse("workshift:add_workshifter")),
(False, reverse("workshift:add_shift")),
(False, reverse("workshift:fine_date")),
(False, reverse("workshift:edit_shift", kwargs={"pk": self.wshift.pk})),
(False, reverse("workshift:edit_instance", kwargs={"pk": self.winstance.pk})),
(False, reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk})),
(False, reverse("workshift:edit_shift", kwargs={"pk": self.mshift.pk})),
(False, reverse("workshift:edit_instance", kwargs={"pk": self.minstance.pk})),
(False, reverse("workshift:edit_type", kwargs={"pk": self.mtype.pk})),
]
for okay, url in urls:
response = self.client.get(url, follow=True)
if okay:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, reverse("workshift:view_semester"))
self.assertContains(response, MESSAGES["ADMINS_ONLY"])
class TestWorkshifters(TestCase):
"""
Tests that workshift profiles are created correctly for different types of
members.
"""
def setUp(self):
self.ru = User.objects.create_user(username="ru", password="<PASSWORD>")
self.bu = User.objects.create_user(username="bu", password="<PASSWORD>")
self.au = User.objects.create_user(username="au", password="<PASSWORD>")
self.ru.is_superuser = True
self.ru.save()
self.rp = UserProfile.objects.get(user=self.ru)
self.bp = UserProfile.objects.get(user=self.bu)
self.ap = UserProfile.objects.get(user=self.au)
self.rp.status = UserProfile.RESIDENT
self.bp.status = UserProfile.BOARDER
self.ap.status = UserProfile.ALUMNUS
self.rp.save()
self.bp.save()
self.ap.save()
Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.ru),
workshift_manager=True,
)
self.assertTrue(self.client.login(username="ru", password="<PASSWORD>"))
url = reverse("workshift:start_semester")
today = localtime(now()).date()
response = self.client.post(url, {
"semester-season": Semester.SUMMER,
"semester-year": today.year,
"semester-rate": 13.30,
"semester-policy": "http://bsc.coop",
"semester-start_date": today,
"semester-end_date": today + timedelta(weeks=18),
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
def test_no_alumni(self):
"""
Tests that WorkshiftProfiles are not created for alumni.
"""
self.assertEqual(
1,
WorkshiftProfile.objects.filter(user=self.ru).count(),
)
self.assertEqual(
0,
WorkshiftProfile.objects.filter(user=self.bu).count(),
)
self.assertEqual(
0,
WorkshiftProfile.objects.filter(user=self.au).count(),
)
def test_add_user_resident(self):
"""
Test that adding a resident creates a workshift profile.
"""
pr = ProfileRequest.objects.create(
username="request",
first_name="first",
last_name="last",
email="<EMAIL>",
affiliation=UserProfile.RESIDENT,
password="<PASSWORD>",
)
url = reverse("modify_profile_request", kwargs={"request_pk": pr.pk})
response = self.client.post(url, {
"username": pr.username,
"first_name": pr.first_name,
"last_name": pr.last_name,
"email": pr.email,
"affiliation": pr.affiliation,
"former_houses": "",
"is_active": True,
"add_user": "",
}, follow=True)
self.assertRedirects(response, reverse("manage_profile_requests"))
self.assertContains(
response,
"User {0} was successfully added".format(pr.username),
)
self.assertEqual(
1,
WorkshiftProfile.objects.filter(user__username=pr.username).count(),
)
def test_add_user_boarder(self):
"""
Test that adding a boarder creates a workshift profile.
"""
pr = ProfileRequest.objects.create(
username="request",
first_name="first",
last_name="last",
email="<EMAIL>",
affiliation=UserProfile.BOARDER,
password="<PASSWORD>",
)
url = reverse("modify_profile_request", kwargs={"request_pk": pr.pk})
response = self.client.post(url, {
"username": pr.username,
"first_name": pr.first_name,
"last_name": pr.last_name,
"email": pr.email,
"affiliation": pr.affiliation,
"former_houses": "",
"is_active": True,
"add_user": "",
}, follow=True)
self.assertRedirects(response, reverse("manage_profile_requests"))
self.assertContains(
response,
"User {0} was successfully added".format(pr.username),
)
self.assertEqual(
0,
WorkshiftProfile.objects.filter(user__username=pr.username).count(),
)
def test_add_user_alumni(self):
"""
Test that adding an alumni does not create a workshift profile.
"""
pr = ProfileRequest.objects.create(
username="request",
first_name="first",
last_name="last",
email="<EMAIL>",
affiliation=UserProfile.ALUMNUS,
password="<PASSWORD>",
)
url = reverse("modify_profile_request", kwargs={"request_pk": pr.pk})
response = self.client.post(url, {
"username": pr.username,
"first_name": pr.first_name,
"last_name": pr.last_name,
"email": pr.email,
"affiliation": pr.affiliation,
"former_houses": "",
"is_active": True,
"add_user": "",
}, follow=True)
self.assertRedirects(response, reverse("manage_profile_requests"))
self.assertContains(
response,
"User {0} was successfully added".format(pr.username),
)
self.assertEqual(
0,
WorkshiftProfile.objects.filter(user__username=pr.username).count(),
)
def test_add_workshifter(self):
url = reverse("workshift:add_workshifter")
response = self.client.post(url, {
"user-{0}-add_profile".format(self.bu.pk): True,
"user-{0}-hours".format(self.bu.pk): 3,
"user-{0}-hours".format(self.au.pk): 3,
})
self.assertRedirects(response, reverse("workshift:manage"))
self.assertEqual(
1,
WorkshiftProfile.objects.filter(user=self.bu).count()
)
self.assertEqual(
0,
WorkshiftProfile.objects.filter(user=self.au).count()
)
profile = WorkshiftProfile.objects.get(user=self.bu)
self.assertEqual(
profile.pool_hours.get(
pool=WorkshiftPool.objects.get(is_primary=True),
).hours,
3,
)
class TestWorkshifts(TestCase):
"""
Tests the pages for adding and modifying workshift types, regular shifts,
and instances of shifts.
"""
def setUp(self):
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.u = User.objects.create_user(username="u", password="<PASSWORD>")
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=UserProfile.objects.get(user=self.wu),
workshift_manager=True,
)
today = localtime(now()).date()
self.sem = Semester.objects.create(
year=2014,
start_date=today,
end_date=today + timedelta(days=6),
)
self.pool = WorkshiftPool.objects.get(
semester=self.sem,
)
self.wp = WorkshiftProfile.objects.get(user=self.wu)
self.up = WorkshiftProfile.objects.get(user=self.u)
self.wtype = WorkshiftType.objects.create(
title="Test Posts",
description="Test Description",
)
self.shift = RegularWorkshift.objects.create(
workshift_type=self.wtype,
pool=self.pool,
day=today.weekday(),
start_time=time(16),
end_time=time(18),
)
self.instance = WorkshiftInstance.objects.get(
weekly_workshift=self.shift,
)
info = InstanceInfo.objects.create(
title="Clean The Deck",
pool=self.pool,
description="Make sure to sing sailor tunes.",
)
self.once = WorkshiftInstance.objects.create(
info=info,
date=today,
workshifter=self.wp,
)
self.client.login(username="wu", password="<PASSWORD>")
def test_add_instance(self):
url = reverse("workshift:add_shift")
response = self.client.post(url, {
"add_instance": "",
"weekly_workshift": self.shift.pk,
"date": date(2014, 5, 27),
"workshifter": self.wp.pk,
"closed": False,
"hours": 2,
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
instance = WorkshiftInstance.objects.get(pk=self.once.pk + 1)
self.assertEqual(self.shift, instance.weekly_workshift)
self.assertEqual(None, instance.info)
self.assertEqual(date(2014, 5, 27), instance.date)
self.assertEqual(self.wp, instance.workshifter)
self.assertEqual(False, instance.closed)
self.assertEqual(2, instance.hours)
self.assertEqual(self.shift.verify, instance.verify)
self.assertEqual(False, instance.week_long)
def test_edit_instance(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.instance.pk})
response = self.client.post(url, {
"edit": "",
"weekly_workshift": self.instance.weekly_workshift.pk,
"title": self.instance.title,
"description": self.instance.description,
"pool": self.instance.pool.pk,
"start_time": self.instance.start_time.strftime("%I:%M %p"),
"end_time": self.instance.end_time.strftime("%I:%M %p"),
"date": date(2014, 5, 27),
"workshifter": self.wp.pk,
"closed": False,
"verify": self.instance.verify,
"week_long": self.instance.week_long,
}, follow=True)
url = reverse("workshift:view_instance", kwargs={"pk": self.instance.pk})
self.assertRedirects(response, url)
self.assertEqual(1, InstanceInfo.objects.count())
instance = WorkshiftInstance.objects.get(pk=self.instance.pk)
self.assertEqual(self.instance.weekly_workshift, instance.weekly_workshift)
self.assertEqual(self.instance.title, instance.title)
self.assertEqual(self.instance.description, instance.description)
self.assertEqual(self.pool, instance.pool)
self.assertEqual(self.instance.start_time, instance.start_time)
self.assertEqual(self.instance.end_time, instance.end_time)
self.assertEqual(date(2014, 5, 27), instance.date)
self.assertEqual(self.wp, instance.workshifter)
self.assertEqual(False, instance.closed)
self.assertEqual(self.instance.verify, instance.verify)
self.assertEqual(self.instance.week_long, instance.week_long)
def test_edit_instance_full(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.instance.pk})
response = self.client.post(url, {
"edit": "",
"title": "Edit Instance Title",
"description": "I once was from a long line of workshifts",
"pool": self.instance.pool.pk,
"start_time": "2:00 PM",
"end_time": "4:00 PM",
"date": date(2014, 5, 27),
"workshifter": self.wp.pk,
"closed": False,
"verify": SELF_VERIFY,
"week_long": False,
}, follow=True)
url = reverse("workshift:view_instance", kwargs={"pk": self.instance.pk})
self.assertRedirects(response, url)
self.assertEqual(InstanceInfo.objects.count(), 2)
instance = WorkshiftInstance.objects.get(pk=self.instance.pk)
self.assertEqual(instance.weekly_workshift, None)
self.assertEqual(instance.title, "Edit Instance Title")
self.assertEqual(
"I once was from a long line of workshifts",
instance.description,
)
self.assertEqual(instance.pool, self.pool)
self.assertEqual(instance.start_time, time(14, 0, 0))
self.assertEqual(instance.end_time, time(16, 0, 0))
self.assertEqual(instance.date, date(2014, 5, 27))
self.assertEqual(instance.workshifter, self.wp)
self.assertEqual(instance.closed, False)
self.assertEqual(SELF_VERIFY, instance.verify)
self.assertEqual(instance.week_long, False)
def test_delete_instance(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.instance.pk})
response = self.client.post(url, {
"delete": "",
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
self.assertEqual(
1,
WorkshiftType.objects.filter(pk=self.wtype.pk).count(),
)
self.assertEqual(
1,
RegularWorkshift.objects.filter(pk=self.shift.pk).count(),
)
self.assertEqual(
0,
WorkshiftInstance.objects.filter(pk=self.instance.pk).count(),
)
self.assertEqual(
1,
WorkshiftInstance.objects.filter(pk=self.once.pk).count(),
)
def test_add_once(self):
url = reverse("workshift:add_shift")
response = self.client.post(url, {
"add_instance": "",
"title": "Add Instance Title",
"description": "Add Instance Description",
"pool": self.pool.pk,
"start_time": "6:00 PM",
"end_time": "8:00 PM",
"date": date(2014, 5, 27),
"workshifter": self.wp.pk,
"closed": False,
"hours": 2,
"verify": WORKSHIFT_MANAGER_VERIFY,
"week_long": False,
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
instance = WorkshiftInstance.objects.get(pk=self.once.pk + 1)
self.assertEqual("Add Instance Title", instance.title)
self.assertEqual("Add Instance Description", instance.description)
self.assertEqual(self.pool, instance.pool)
self.assertEqual(time(18, 0, 0), instance.start_time)
self.assertEqual(time(20, 0, 0), instance.end_time)
self.assertEqual(date(2014, 5, 27), instance.date)
self.assertEqual(self.wp, instance.workshifter)
self.assertEqual(False, instance.closed)
self.assertEqual(2, instance.hours)
self.assertEqual(WORKSHIFT_MANAGER_VERIFY, instance.verify)
self.assertEqual(False, instance.week_long)
def test_edit_once(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.once.pk})
response = self.client.post(url, {
"edit": "",
"title": "Edit Instance Title",
"description": "I once was from a long line of workshifts",
"pool": self.instance.pool.pk,
"start_time": "2:00 PM",
"end_time": "4:00 PM",
"date": date(2014, 5, 27),
"workshifter": self.wp.pk,
"closed": False,
"verify": OTHER_VERIFY,
"week_long": False,
}, follow=True)
url = reverse("workshift:view_instance", kwargs={"pk": self.once.pk})
self.assertRedirects(response, url)
self.assertEqual(1, InstanceInfo.objects.count())
instance = WorkshiftInstance.objects.get(pk=self.once.pk)
self.assertEqual(None, instance.weekly_workshift)
self.assertEqual("Edit Instance Title", instance.title)
self.assertEqual(
"I once was from a long line of workshifts",
instance.description,
)
self.assertEqual(self.pool, instance.pool)
self.assertEqual(time(14, 0, 0), instance.start_time)
self.assertEqual(time(16, 0, 0), instance.end_time)
self.assertEqual(date(2014, 5, 27), instance.date)
self.assertEqual(self.wp, instance.workshifter)
self.assertEqual(False, instance.closed)
self.assertEqual(OTHER_VERIFY, instance.verify)
self.assertEqual(False, instance.week_long)
def test_delete_once(self):
url = reverse("workshift:edit_instance", kwargs={"pk": self.once.pk})
response = self.client.post(url, {
"delete": "",
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
self.assertEqual(
1,
WorkshiftType.objects.filter(pk=self.wtype.pk).count(),
)
self.assertEqual(
1,
RegularWorkshift.objects.filter(pk=self.shift.pk).count(),
)
self.assertEqual(
WorkshiftInstance.objects.filter(pk=self.instance.pk).count(),
1,
)
self.assertEqual(
0,
WorkshiftInstance.objects.filter(pk=self.once.pk).count(),
)
def test_edit_shift(self):
url = reverse("workshift:edit_shift", kwargs={"pk": self.shift.pk})
response = self.client.post(url, {
"edit": "",
"workshift_type": self.wtype.pk,
"pool": self.pool.pk,
"hours": 42,
"count": 4,
"day": DAY_CHOICES[0][0],
"active": True,
"current_assignees": [self.up.pk],
"start_time": "4:00 PM",
"end_time": "6:00 PM",
"verify": AUTO_VERIFY,
"addendum": "Edited addendum",
}, follow=True)
url = reverse("workshift:view_shift", kwargs={"pk": self.shift.pk})
self.assertRedirects(response, url)
shift = RegularWorkshift.objects.get(pk=self.shift.pk)
self.assertEqual(self.wtype, shift.workshift_type)
self.assertEqual(self.pool, shift.pool)
self.assertEqual(shift.hours, 42)
self.assertEqual(4, shift.count)
self.assertEqual(True, shift.active)
self.assertEqual(
[self.up],
list(shift.current_assignees.all()),
)
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(i.workshifter == self.up for i in instances))
self.assertEqual(time(16), shift.start_time)
self.assertEqual(time(18), shift.end_time)
self.assertEqual(AUTO_VERIFY, shift.verify)
self.assertEqual(DAY_CHOICES[0][0], shift.day)
self.assertEqual(False, shift.week_long)
self.assertEqual("Edited addendum", shift.addendum)
def test_delete_shift(self):
url = reverse("workshift:edit_shift", kwargs={"pk": self.shift.pk})
response = self.client.post(url, {
"delete": "",
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
self.assertEqual(
1,
WorkshiftType.objects.filter(pk=self.wtype.pk).count(),
)
self.assertEqual(
0,
RegularWorkshift.objects.filter(pk=self.shift.pk).count(),
)
self.assertEqual(
0,
WorkshiftInstance.objects.filter(pk=self.instance.pk).count(),
)
self.assertEqual(
1,
WorkshiftInstance.objects.filter(pk=self.once.pk).count(),
)
def test_add_type(self):
url = reverse("workshift:add_shift")
response = self.client.post(url, {
"add_type": "",
"type-title": "Added Title",
"type-description": "Added Description",
"type-quick_tips": "Added Quick Tips",
"type-rateable": True,
"type-assignment": WorkshiftType.AUTO_ASSIGN,
"shifts-TOTAL_FORMS": 0,
"shifts-INITIAL_FORMS": 0,
"shifts-MAX_NUM_FORMS": 50,
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
shift_type = WorkshiftType.objects.get(title="Added Title")
self.assertEqual(shift_type.title, "Added Title")
self.assertEqual(shift_type.description, "Added Description")
self.assertEqual(shift_type.quick_tips, "Added Quick Tips")
self.assertEqual(shift_type.rateable, True)
self.assertEqual(shift_type.assignment, WorkshiftType.AUTO_ASSIGN)
def test_edit_type(self):
url = reverse("workshift:edit_type", kwargs={"pk": self.wtype.pk})
response = self.client.post(url, {
"edit": "",
"edit-title": "Edited Title",
"edit-description": "Edited Description",
"edit-quick_tips": "Edited Quick Tips",
"edit-rateable": False,
"edit-assignment": WorkshiftType.MANUAL_ASSIGN,
"shifts-TOTAL_FORMS": 0,
"shifts-INITIAL_FORMS": 0,
"shifts-MAX_NUM_FORMS": 50,
}, follow=True)
url = reverse("workshift:view_type", kwargs={"pk": self.wtype.pk})
self.assertRedirects(response, url)
shift_type = WorkshiftType.objects.get(pk=self.wtype.pk)
self.assertEqual(shift_type.title, "Edited Title")
self.assertEqual(shift_type.description, "Edited Description")
self.assertEqual(shift_type.quick_tips, "Edited Quick Tips")
self.assertEqual(shift_type.rateable, False)
self.assertEqual(shift_type.assignment, WorkshiftType.MANUAL_ASSIGN)
class TestSemester(TestCase):
"""
Tests for correct behavior when multiple semesters exist, including when
there exist multiple "current" semesters.
"""
def setUp(self):
self.wu = User.objects.create_user(username="wu", password="<PASSWORD>")
self.wu.first_name = "Workshift"
self.wu.last_name = "User"
self.wu.save()
self.wp = UserProfile.objects.get(user=self.wu)
self.wm = Manager.objects.create(
title="Workshift Manager",
incumbent=self.wp,
workshift_manager=True,
)
today = localtime(now()).date()
last_year = today - timedelta(days=365)
self.s2 = Semester.objects.create(
year=last_year.year,
start_date=last_year,
end_date=last_year + timedelta(days=7),
)
self.s1 = Semester.objects.create(
year=today.year, start_date=today,
end_date=today + timedelta(days=7),
)
self.assertEqual(
RegularWorkshift.objects.count(),
2,
)
self.wprofile = WorkshiftProfile.objects.get(user=self.wu, semester=self.s1)
self.client.login(username="wu", password="<PASSWORD>")
def test_fill_shifts(self):
url = reverse("workshift:fill_shifts")
response = self.client.post(url, {
}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(
RegularWorkshift.objects.count(),
2, # 2x Workshift Manager
)
names = [
"fill_regular_shifts",
"fill_humor_shifts",
"fill_bathroom_shifts",
"fill_social_shifts",
"fill_HI_shifts",
]
for name in names:
response = self.client.post(url, {
name: "",
}, follow=True)
self.assertRedirects(response, reverse("workshift:fill_shifts"))
# Check we created the correct number of shifts (no duplicates across semesters)
self.assertEqual(
RegularWorkshift.objects.count(),
sum(len(i[2]) for i in REGULAR_WORKSHIFTS) + len(WEEK_LONG) +
sum(len(i[2]) for i in HUMOR_WORKSHIFTS) +
sum(len(i[2]) for i in BATHROOM_WORKSHIFTS) +
Manager.objects.count() * 2,
)
response = self.client.post(url, {
"reset_all_shifts": "",
}, follow=True)
self.assertRedirects(response, reverse("workshift:fill_shifts"))
self.assertEqual(
RegularWorkshift.objects.count(),
2, # 2x Workshift Manager
)
def test_clear_semester(self):
self.s1.delete()
self.assertEqual(
Semester.objects.count(),
1,
)
self.s2.delete()
self.assertEqual(
Semester.objects.count(),
0,
)
def test_new_semester(self):
url = reverse("workshift:start_semester")
today = localtime(now()).date()
response = self.client.post(url, {
"semester-year": today.year,
"semester-season": Semester.FALL,
"semester-rate": 14.00,
"semester-policy": "http://bsc.coop",
"semester-start_date": today,
"semester-end_date": today + timedelta(weeks=18) - timedelta(days=today.weekday()),
}, follow=True)
self.assertRedirects(response, reverse("workshift:manage"))
self.assertEqual(
Semester.objects.filter(current=True).count(),
1,
)
s = Semester.objects.get(current=True)
self.assertEqual(
s.year,
today.year,
)
self.assertEqual(
s.season,
Semester.FALL,
)
self.assertEqual(
WorkshiftProfile.objects.filter(semester=s).count(),
1,
)
self.assertEqual(
RegularWorkshift.objects.filter(pool__semester=s).count(),
1,
)
self.assertEqual(
WorkshiftProfile.objects.filter(user=self.wu, semester=s).count(),
1,
)
shift = RegularWorkshift.objects.get(pool__semester=s)
self.assertEqual(
[i.pk for i in shift.current_assignees.all()],
[i.pk for i in WorkshiftProfile.objects.filter(user=self.wu, semester=s)],
)
self.assertEqual(
WorkshiftInstance.objects.filter(weekly_workshift=shift).count(),
18, # 18 instances of Workshift Manager shift
)
def test_no_current(self):
self.s1.current = False
self.s1.save()
url = reverse("workshift:view_semester")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("workshift:start_semester"))
def test_multiple_current(self):
self.s2.current = True
self.s2.save()
workshift_emails_str = ""
url = reverse("workshift:view_semester")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
MESSAGES['MULTIPLE_CURRENT_SEMESTERS'].format(
admin_email=settings.ADMINS[0][1],
workshift_emails=workshift_emails_str,
))
def test_multiple_current_workshift_email(self):
self.s2.current = True
self.s2.save()
self.wm.email = "<EMAIL>"
self.wm.save()
workshift_emails_str = ' (<a href="mailto:{0}">{0}</a>)'.format(self.wm.email)
url = reverse("workshift:view_semester")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
MESSAGES['MULTIPLE_CURRENT_SEMESTERS'].format(
admin_email=settings.ADMINS[0][1],
workshift_emails=workshift_emails_str,
))
def test_multiple_current_user_email(self):
self.s2.current = True
self.s2.save()
self.wu.email = "<EMAIL>"
self.wp.email_visible = True
self.wu.save()
self.wp.save()
workshift_emails_str = ' (<a href="mailto:{0}">{0}</a>)'.format(self.wu.email)
url = reverse("workshift:view_semester")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
MESSAGES['MULTIPLE_CURRENT_SEMESTERS'].format(
admin_email=settings.ADMINS[0][1],
workshift_emails=workshift_emails_str,
))
| [
"workshift.utils.collect_blown",
"django.core.urlresolvers.reverse",
"workshift.cron.CollectBlownCronJob",
"base.models.ProfileRequest.objects.create",
"datetime.timedelta",
"workshift.utils.auto_assign_shifts",
"datetime.time",
"managers.models.Manager.objects.all",
"django.utils.timezone.now",
"datetime.date",
"workshift.utils.get_year_season",
"managers.models.Manager.objects.create",
"base.models.User.objects.create_user",
"workshift.cron.UpdateWeeklyStandings",
"workshift.utils.make_workshift_pool_hours",
"workshift.utils.get_semester_start_end",
"base.models.UserProfile.objects.get",
"managers.models.Manager.objects.count",
"workshift.utils.make_instances",
"farnsworth.pre_fill.main"
] | [((953, 1014), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u"""', 'password': '"""<PASSWORD>"""'}), "(username='u', password='<PASSWORD>')\n", (977, 1014), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((1033, 1095), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (1057, 1095), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((1545, 1579), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (1552, 1579), False, 'from django.core.urlresolvers import reverse\n'), ((1749, 1783), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (1756, 1783), False, 'from django.core.urlresolvers import reverse\n'), ((2171, 2206), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (2178, 2206), False, 'from django.core.urlresolvers import reverse\n'), ((3793, 3832), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u0"""'}), "(username='u0')\n", (3817, 3832), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((5259, 5314), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester'}), '(semester=self.semester)\n', (5290, 5314), False, 'from workshift import utils, signals\n'), ((5582, 5621), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (5606, 5621), False, 'from workshift import utils, signals\n'), ((6689, 6728), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (6713, 6728), False, 'from workshift import utils, signals\n'), ((7350, 7389), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (7374, 7389), False, 'from workshift import utils, signals\n'), ((8331, 8370), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (8355, 8370), False, 'from workshift import utils, signals\n'), ((9466, 9505), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (9490, 9505), False, 'from workshift import utils, signals\n'), ((10878, 10917), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (10902, 10917), False, 'from workshift import utils, signals\n'), ((12439, 12494), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester'}), '(semester=self.semester)\n', (12470, 12494), False, 'from workshift import utils, signals\n'), ((12516, 12555), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (12540, 12555), False, 'from workshift import utils, signals\n'), ((13371, 13426), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester'}), '(semester=self.semester)\n', (13402, 13426), False, 'from workshift import utils, signals\n'), ((13448, 13487), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (13472, 13487), False, 'from workshift import utils, signals\n'), ((14053, 14097), 'farnsworth.pre_fill.main', 'pre_fill.main', (["['--managers', '--workshift']"], {}), "(['--managers', '--workshift'])\n", (14066, 14097), False, 'from farnsworth import pre_fill\n'), ((14106, 14161), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester'}), '(semester=self.semester)\n', (14137, 14161), False, 'from workshift import utils, signals\n'), ((14384, 14423), 'workshift.utils.auto_assign_shifts', 'utils.auto_assign_shifts', (['self.semester'], {}), '(self.semester)\n', (14408, 14423), False, 'from workshift import utils, signals\n'), ((14750, 14794), 'farnsworth.pre_fill.main', 'pre_fill.main', (["['--managers', '--workshift']"], {}), "(['--managers', '--workshift'])\n", (14763, 14794), False, 'from farnsworth import pre_fill\n'), ((14803, 14858), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester'}), '(semester=self.semester)\n', (14834, 14858), False, 'from workshift import utils, signals\n'), ((15539, 15608), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u"""', 'first_name': '"""N"""', 'last_name': '"""M"""'}), "(username='u', first_name='N', last_name='M')\n", (15563, 15608), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((16469, 16492), 'workshift.utils.get_year_season', 'utils.get_year_season', ([], {}), '()\n', (16490, 16492), False, 'from workshift import utils, signals\n'), ((18123, 18156), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {}), '()\n', (18154, 18156), False, 'from workshift import utils, signals\n'), ((18367, 18435), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester', 'profiles': '[]'}), '(semester=self.semester, profiles=[])\n', (18398, 18435), False, 'from workshift import utils, signals\n'), ((18600, 18685), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester', 'profiles': '[self.profile]'}), '(semester=self.semester, profiles=[self.profile]\n )\n', (18631, 18685), False, 'from workshift import utils, signals\n'), ((18928, 19000), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester', 'pools': '[self.p1]'}), '(semester=self.semester, pools=[self.p1])\n', (18959, 19000), False, 'from workshift import utils, signals\n'), ((19165, 19237), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester', 'pools': '[self.p2]'}), '(semester=self.semester, pools=[self.p2])\n', (19196, 19237), False, 'from workshift import utils, signals\n'), ((19487, 19559), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {'semester': 'self.semester', 'primary_hours': '(6)'}), '(semester=self.semester, primary_hours=6)\n', (19518, 19559), False, 'from workshift import utils, signals\n'), ((20587, 20647), 'workshift.utils.make_instances', 'utils.make_instances', ([], {'semester': 'self.semester', 'shifts': '[shift]'}), '(semester=self.semester, shifts=[shift])\n', (20607, 20647), False, 'from workshift import utils, signals\n'), ((21309, 21342), 'workshift.utils.make_workshift_pool_hours', 'utils.make_workshift_pool_hours', ([], {}), '()\n', (21340, 21342), False, 'from workshift import utils, signals\n'), ((24647, 24708), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u"""', 'password': '"""<PASSWORD>"""'}), "(username='u', password='<PASSWORD>')\n", (24671, 24708), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((24727, 24789), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (24751, 24789), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((30513, 30544), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:list_types"""'], {}), "('workshift:list_types')\n", (30520, 30544), False, 'from django.core.urlresolvers import reverse\n'), ((30913, 30973), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:view_type', kwargs={'pk': self.wtype.pk})\n", (30920, 30973), False, 'from django.core.urlresolvers import reverse\n'), ((31290, 31350), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (31297, 31350), False, 'from django.core.urlresolvers import reverse\n'), ((31663, 31724), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_shift"""'], {'kwargs': "{'pk': self.shift.pk}"}), "('workshift:view_shift', kwargs={'pk': self.shift.pk})\n", (31670, 31724), False, 'from django.core.urlresolvers import reverse\n'), ((32266, 32327), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.shift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.shift.pk})\n", (32273, 32327), False, 'from django.core.urlresolvers import reverse\n'), ((32714, 32781), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:view_instance', kwargs={'pk': self.instance.pk})\n", (32721, 32781), False, 'from django.core.urlresolvers import reverse\n'), ((33593, 33660), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.instance.pk})\n", (33600, 33660), False, 'from django.core.urlresolvers import reverse\n'), ((34377, 34440), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.once.pk}"}), "('workshift:view_instance', kwargs={'pk': self.once.pk})\n", (34384, 34440), False, 'from django.core.urlresolvers import reverse\n'), ((35170, 35233), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.once.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.once.pk})\n", (35177, 35233), False, 'from django.core.urlresolvers import reverse\n'), ((35727, 35761), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (35734, 35761), False, 'from django.core.urlresolvers import reverse\n'), ((36033, 36067), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (36040, 36067), False, 'from django.core.urlresolvers import reverse\n'), ((36648, 36682), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (36655, 36682), False, 'from django.core.urlresolvers import reverse\n'), ((37138, 37172), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (37145, 37172), False, 'from django.core.urlresolvers import reverse\n'), ((37696, 37730), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (37703, 37730), False, 'from django.core.urlresolvers import reverse\n'), ((38381, 38415), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (38388, 38415), False, 'from django.core.urlresolvers import reverse\n'), ((38781, 38815), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (38788, 38815), False, 'from django.core.urlresolvers import reverse\n'), ((39337, 39367), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fine_date"""'], {}), "('workshift:fine_date')\n", (39344, 39367), False, 'from django.core.urlresolvers import reverse\n'), ((39808, 39870), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (39832, 39870), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((41265, 41358), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:preferences"""'], {'kwargs': "{'targetUsername': self.wprofile.user.username}"}), "('workshift:preferences', kwargs={'targetUsername': self.wprofile.\n user.username})\n", (41272, 41358), False, 'from django.core.urlresolvers import reverse\n'), ((48261, 48323), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (48285, 48323), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((48341, 48402), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u"""', 'password': '"""<PASSWORD>"""'}), "(username='u', password='<PASSWORD>')\n", (48365, 48402), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((48421, 48483), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""ou"""', 'password': '"""<PASSWORD>"""'}), "(username='ou', password='<PASSWORD>')\n", (48445, 48483), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((55323, 55385), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (55347, 55385), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((55404, 55466), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""mu"""', 'password': '"""<PASSWORD>"""'}), "(username='mu', password='<PASSWORD>')\n", (55428, 55466), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((55484, 55545), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u"""', 'password': '"""<PASSWORD>"""'}), "(username='u', password='<PASSWORD>')\n", (55508, 55545), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((55564, 55626), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""ou"""', 'password': '"""<PASSWORD>"""'}), "(username='ou', password='<PASSWORD>')\n", (55588, 55626), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((65553, 65615), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""ru"""', 'password': '"""<PASSWORD>"""'}), "(username='ru', password='<PASSWORD>')\n", (65577, 65615), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((65634, 65696), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""bu"""', 'password': '"""<PASSWORD>"""'}), "(username='bu', password='<PASSWORD>')\n", (65658, 65696), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((65715, 65777), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""au"""', 'password': '"""<PASSWORD>"""'}), "(username='au', password='<PASSWORD>')\n", (65739, 65777), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((65857, 65894), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.ru'}), '(user=self.ru)\n', (65880, 65894), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((65913, 65950), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.bu'}), '(user=self.bu)\n', (65936, 65950), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((65969, 66006), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.au'}), '(user=self.au)\n', (65992, 66006), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((66490, 66525), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (66497, 66525), False, 'from django.core.urlresolvers import reverse\n'), ((67596, 67765), 'base.models.ProfileRequest.objects.create', 'ProfileRequest.objects.create', ([], {'username': '"""request"""', 'first_name': '"""first"""', 'last_name': '"""last"""', 'email': '"""<EMAIL>"""', 'affiliation': 'UserProfile.RESIDENT', 'password': '"""<PASSWORD>"""'}), "(username='request', first_name='first',\n last_name='last', email='<EMAIL>', affiliation=UserProfile.RESIDENT,\n password='<PASSWORD>')\n", (67625, 67765), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((67856, 67919), 'django.core.urlresolvers.reverse', 'reverse', (['"""modify_profile_request"""'], {'kwargs': "{'request_pk': pr.pk}"}), "('modify_profile_request', kwargs={'request_pk': pr.pk})\n", (67863, 67919), False, 'from django.core.urlresolvers import reverse\n'), ((68746, 68914), 'base.models.ProfileRequest.objects.create', 'ProfileRequest.objects.create', ([], {'username': '"""request"""', 'first_name': '"""first"""', 'last_name': '"""last"""', 'email': '"""<EMAIL>"""', 'affiliation': 'UserProfile.BOARDER', 'password': '"""<PASSWORD>"""'}), "(username='request', first_name='first',\n last_name='last', email='<EMAIL>', affiliation=UserProfile.BOARDER,\n password='<PASSWORD>')\n", (68775, 68914), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((69005, 69068), 'django.core.urlresolvers.reverse', 'reverse', (['"""modify_profile_request"""'], {'kwargs': "{'request_pk': pr.pk}"}), "('modify_profile_request', kwargs={'request_pk': pr.pk})\n", (69012, 69068), False, 'from django.core.urlresolvers import reverse\n'), ((69902, 70070), 'base.models.ProfileRequest.objects.create', 'ProfileRequest.objects.create', ([], {'username': '"""request"""', 'first_name': '"""first"""', 'last_name': '"""last"""', 'email': '"""<EMAIL>"""', 'affiliation': 'UserProfile.ALUMNUS', 'password': '"""<PASSWORD>"""'}), "(username='request', first_name='first',\n last_name='last', email='<EMAIL>', affiliation=UserProfile.ALUMNUS,\n password='<PASSWORD>')\n", (69931, 70070), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((70161, 70224), 'django.core.urlresolvers.reverse', 'reverse', (['"""modify_profile_request"""'], {'kwargs': "{'request_pk': pr.pk}"}), "('modify_profile_request', kwargs={'request_pk': pr.pk})\n", (70168, 70224), False, 'from django.core.urlresolvers import reverse\n'), ((70963, 70999), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_workshifter"""'], {}), "('workshift:add_workshifter')\n", (70970, 70999), False, 'from django.core.urlresolvers import reverse\n'), ((71955, 72017), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (71979, 72017), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((72035, 72096), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""u"""', 'password': '"""<PASSWORD>"""'}), "(username='u', password='<PASSWORD>')\n", (72059, 72096), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((73602, 73632), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (73609, 73632), False, 'from django.core.urlresolvers import reverse\n'), ((74533, 74600), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.instance.pk})\n", (74540, 74600), False, 'from django.core.urlresolvers import reverse\n'), ((75260, 75327), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:view_instance', kwargs={'pk': self.instance.pk})\n", (75267, 75327), False, 'from django.core.urlresolvers import reverse\n'), ((76263, 76330), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.instance.pk})\n", (76270, 76330), False, 'from django.core.urlresolvers import reverse\n'), ((76846, 76913), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:view_instance', kwargs={'pk': self.instance.pk})\n", (76853, 76913), False, 'from django.core.urlresolvers import reverse\n'), ((77834, 77901), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.instance.pk})\n", (77841, 77901), False, 'from django.core.urlresolvers import reverse\n'), ((78617, 78647), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (78624, 78647), False, 'from django.core.urlresolvers import reverse\n'), ((79989, 80052), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.once.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.once.pk})\n", (79996, 80052), False, 'from django.core.urlresolvers import reverse\n'), ((80569, 80632), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.once.pk}"}), "('workshift:view_instance', kwargs={'pk': self.once.pk})\n", (80576, 80632), False, 'from django.core.urlresolvers import reverse\n'), ((81546, 81609), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.once.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.once.pk})\n", (81553, 81609), False, 'from django.core.urlresolvers import reverse\n'), ((82327, 82388), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.shift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.shift.pk})\n", (82334, 82388), False, 'from django.core.urlresolvers import reverse\n'), ((82886, 82947), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_shift"""'], {'kwargs': "{'pk': self.shift.pk}"}), "('workshift:view_shift', kwargs={'pk': self.shift.pk})\n", (82893, 82947), False, 'from django.core.urlresolvers import reverse\n'), ((83967, 84028), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.shift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.shift.pk})\n", (83974, 84028), False, 'from django.core.urlresolvers import reverse\n'), ((84744, 84774), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (84751, 84774), False, 'from django.core.urlresolvers import reverse\n'), ((85732, 85792), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (85739, 85792), False, 'from django.core.urlresolvers import reverse\n'), ((86263, 86323), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:view_type', kwargs={'pk': self.wtype.pk})\n", (86270, 86323), False, 'from django.core.urlresolvers import reverse\n'), ((86971, 87033), 'base.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""wu"""', 'password': '"""<PASSWORD>"""'}), "(username='wu', password='<PASSWORD>')\n", (86995, 87033), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((87151, 87188), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (87174, 87188), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((87208, 87304), 'managers.models.Manager.objects.create', 'Manager.objects.create', ([], {'title': '"""Workshift Manager"""', 'incumbent': 'self.wp', 'workshift_manager': '(True)'}), "(title='Workshift Manager', incumbent=self.wp,\n workshift_manager=True)\n", (87230, 87304), False, 'from managers.models import Manager\n'), ((88054, 88086), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (88061, 88086), False, 'from django.core.urlresolvers import reverse\n'), ((89743, 89778), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (89750, 89778), False, 'from django.core.urlresolvers import reverse\n'), ((91475, 91509), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (91482, 91509), False, 'from django.core.urlresolvers import reverse\n'), ((91781, 91815), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (91788, 91815), False, 'from django.core.urlresolvers import reverse\n'), ((92412, 92446), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (92419, 92446), False, 'from django.core.urlresolvers import reverse\n'), ((93098, 93132), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (93105, 93132), False, 'from django.core.urlresolvers import reverse\n'), ((1876, 1911), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (1883, 1911), False, 'from django.core.urlresolvers import reverse\n'), ((2109, 2128), 'django.core.urlresolvers.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (2116, 2128), False, 'from django.core.urlresolvers import reverse\n'), ((2589, 2616), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (2596, 2616), False, 'from django.core.urlresolvers import reverse\n'), ((14245, 14266), 'managers.models.Manager.objects.all', 'Manager.objects.all', ([], {}), '()\n', (14264, 14266), False, 'from managers.models import Manager\n'), ((14301, 14335), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'user'}), '(user=user)\n', (14324, 14335), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((17653, 17704), 'workshift.utils.get_semester_start_end', 'utils.get_semester_start_end', (['(2014)', 'Semester.SPRING'], {}), '(2014, Semester.SPRING)\n', (17681, 17704), False, 'from workshift import utils, signals\n'), ((17810, 17861), 'workshift.utils.get_semester_start_end', 'utils.get_semester_start_end', (['(2014)', 'Semester.SUMMER'], {}), '(2014, Semester.SUMMER)\n', (17838, 17861), False, 'from workshift import utils, signals\n'), ((17968, 18017), 'workshift.utils.get_semester_start_end', 'utils.get_semester_start_end', (['(2014)', 'Semester.FALL'], {}), '(2014, Semester.FALL)\n', (17996, 18017), False, 'from workshift import utils, signals\n'), ((21407, 21428), 'workshift.utils.collect_blown', 'utils.collect_blown', ([], {}), '()\n', (21426, 21428), False, 'from workshift import utils, signals\n'), ((21509, 21552), 'workshift.utils.collect_blown', 'utils.collect_blown', ([], {'semester': 'self.semester'}), '(semester=self.semester)\n', (21528, 21552), False, 'from workshift import utils, signals\n'), ((21679, 21696), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (21688, 21696), False, 'from datetime import timedelta, time, date\n'), ((23091, 23140), 'datetime.timedelta', 'timedelta', ([], {'hours': 'self.p1.verify_cutoff', 'minutes': '(1)'}), '(hours=self.p1.verify_cutoff, minutes=1)\n', (23100, 23140), False, 'from datetime import timedelta, time, date\n'), ((24146, 24180), 'workshift.utils.collect_blown', 'utils.collect_blown', ([], {'moment': 'moment'}), '(moment=moment)\n', (24165, 24180), False, 'from workshift import utils, signals\n'), ((24431, 24436), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (24434, 24436), False, 'from django.utils.timezone import now, localtime\n'), ((27619, 27650), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:list_types"""'], {}), "('workshift:list_types')\n", (27626, 27650), False, 'from django.core.urlresolvers import reverse\n'), ((27664, 27724), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:view_type', kwargs={'pk': self.wtype.pk})\n", (27671, 27724), False, 'from django.core.urlresolvers import reverse\n'), ((27738, 27772), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (27745, 27772), False, 'from django.core.urlresolvers import reverse\n'), ((27786, 27875), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:profile"""'], {'kwargs': "{'targetUsername': self.wprofile.user.username}"}), "('workshift:profile', kwargs={'targetUsername': self.wprofile.user.\n username})\n", (27793, 27875), False, 'from django.core.urlresolvers import reverse\n'), ((27884, 27945), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_shift"""'], {'kwargs': "{'pk': self.shift.pk}"}), "('workshift:view_shift', kwargs={'pk': self.shift.pk})\n", (27891, 27945), False, 'from django.core.urlresolvers import reverse\n'), ((27959, 28026), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.instance.pk}"}), "('workshift:view_instance', kwargs={'pk': self.instance.pk})\n", (27966, 28026), False, 'from django.core.urlresolvers import reverse\n'), ((28040, 28103), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_instance"""'], {'kwargs': "{'pk': self.once.pk}"}), "('workshift:view_instance', kwargs={'pk': self.once.pk})\n", (28047, 28103), False, 'from django.core.urlresolvers import reverse\n'), ((28117, 28147), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_open"""'], {}), "('workshift:view_open')\n", (28124, 28147), False, 'from django.core.urlresolvers import reverse\n'), ((28161, 28195), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:semester_info"""'], {}), "('workshift:semester_info')\n", (28168, 28195), False, 'from django.core.urlresolvers import reverse\n'), ((28209, 28238), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:profiles"""'], {}), "('workshift:profiles')\n", (28216, 28238), False, 'from django.core.urlresolvers import reverse\n'), ((28436, 28471), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (28443, 28471), False, 'from django.core.urlresolvers import reverse\n'), ((28485, 28516), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:list_types"""'], {}), "('workshift:list_types')\n", (28492, 28516), False, 'from django.core.urlresolvers import reverse\n'), ((28530, 28590), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:view_type', kwargs={'pk': self.wtype.pk})\n", (28537, 28590), False, 'from django.core.urlresolvers import reverse\n'), ((28604, 28664), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (28611, 28664), False, 'from django.core.urlresolvers import reverse\n'), ((30111, 30139), 'django.core.urlresolvers.reverse', 'reverse', (['name'], {'kwargs': 'kwargs'}), '(name, kwargs=kwargs)\n', (30118, 30139), False, 'from django.core.urlresolvers import reverse\n'), ((30339, 30367), 'django.core.urlresolvers.reverse', 'reverse', (['name'], {'kwargs': 'kwargs'}), '(name, kwargs=kwargs)\n', (30346, 30367), False, 'from django.core.urlresolvers import reverse\n'), ((35956, 35973), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (35965, 35973), False, 'from datetime import timedelta, time, date\n'), ((36001, 36018), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (36010, 36018), False, 'from datetime import timedelta, time, date\n'), ((36571, 36588), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (36580, 36588), False, 'from datetime import timedelta, time, date\n'), ((36616, 36633), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (36625, 36633), False, 'from datetime import timedelta, time, date\n'), ((55999, 56004), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (56002, 56004), False, 'from django.utils.timezone import now, localtime\n'), ((58541, 58576), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (58548, 58576), False, 'from django.core.urlresolvers import reverse\n'), ((58590, 58624), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (58597, 58624), False, 'from django.core.urlresolvers import reverse\n'), ((58638, 58672), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:semester_info"""'], {}), "('workshift:semester_info')\n", (58645, 58672), False, 'from django.core.urlresolvers import reverse\n'), ((58686, 58764), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:profile', kwargs={'targetUsername': self.up.user.username})\n", (58693, 58764), False, 'from django.core.urlresolvers import reverse\n'), ((58778, 58866), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:edit_profile', kwargs={'targetUsername': self.up.user.\n username})\n", (58785, 58866), False, 'from django.core.urlresolvers import reverse\n'), ((58875, 58962), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:preferences"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:preferences', kwargs={'targetUsername': self.up.user.\n username})\n", (58882, 58962), False, 'from django.core.urlresolvers import reverse\n'), ((58971, 58998), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (58978, 58998), False, 'from django.core.urlresolvers import reverse\n'), ((59012, 59044), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (59019, 59044), False, 'from django.core.urlresolvers import reverse\n'), ((59058, 59092), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (59065, 59092), False, 'from django.core.urlresolvers import reverse\n'), ((59106, 59139), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:adjust_hours"""'], {}), "('workshift:adjust_hours')\n", (59113, 59139), False, 'from django.core.urlresolvers import reverse\n'), ((59153, 59189), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_workshifter"""'], {}), "('workshift:add_workshifter')\n", (59160, 59189), False, 'from django.core.urlresolvers import reverse\n'), ((59203, 59233), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fine_date"""'], {}), "('workshift:fine_date')\n", (59210, 59233), False, 'from django.core.urlresolvers import reverse\n'), ((59247, 59277), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (59254, 59277), False, 'from django.core.urlresolvers import reverse\n'), ((59291, 59353), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.wshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.wshift.pk})\n", (59298, 59353), False, 'from django.core.urlresolvers import reverse\n'), ((59367, 59435), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.winstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.winstance.pk})\n", (59374, 59435), False, 'from django.core.urlresolvers import reverse\n'), ((59449, 59509), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (59456, 59509), False, 'from django.core.urlresolvers import reverse\n'), ((59523, 59585), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.mshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.mshift.pk})\n", (59530, 59585), False, 'from django.core.urlresolvers import reverse\n'), ((59599, 59667), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.minstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.minstance.pk})\n", (59606, 59667), False, 'from django.core.urlresolvers import reverse\n'), ((59681, 59741), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.mtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.mtype.pk})\n", (59688, 59741), False, 'from django.core.urlresolvers import reverse\n'), ((66952, 66979), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (66959, 66979), False, 'from django.core.urlresolvers import reverse\n'), ((68310, 68344), 'django.core.urlresolvers.reverse', 'reverse', (['"""manage_profile_requests"""'], {}), "('manage_profile_requests')\n", (68317, 68344), False, 'from django.core.urlresolvers import reverse\n'), ((69459, 69493), 'django.core.urlresolvers.reverse', 'reverse', (['"""manage_profile_requests"""'], {}), "('manage_profile_requests')\n", (69466, 69493), False, 'from django.core.urlresolvers import reverse\n'), ((70615, 70649), 'django.core.urlresolvers.reverse', 'reverse', (['"""manage_profile_requests"""'], {}), "('manage_profile_requests')\n", (70622, 70649), False, 'from django.core.urlresolvers import reverse\n'), ((71259, 71286), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (71266, 71286), False, 'from django.core.urlresolvers import reverse\n'), ((73954, 73981), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (73961, 73981), False, 'from django.core.urlresolvers import reverse\n'), ((74188, 74205), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (74192, 74205), False, 'from datetime import timedelta, time, date\n'), ((75936, 75953), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (75940, 75953), False, 'from datetime import timedelta, time, date\n'), ((77436, 77450), 'datetime.time', 'time', (['(14)', '(0)', '(0)'], {}), '(14, 0, 0)\n', (77440, 77450), False, 'from datetime import timedelta, time, date\n'), ((77496, 77510), 'datetime.time', 'time', (['(16)', '(0)', '(0)'], {}), '(16, 0, 0)\n', (77500, 77510), False, 'from datetime import timedelta, time, date\n'), ((77552, 77569), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (77556, 77569), False, 'from datetime import timedelta, time, date\n'), ((78039, 78066), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (78046, 78066), False, 'from django.core.urlresolvers import reverse\n'), ((79206, 79233), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (79213, 79233), False, 'from django.core.urlresolvers import reverse\n'), ((79519, 79533), 'datetime.time', 'time', (['(18)', '(0)', '(0)'], {}), '(18, 0, 0)\n', (79523, 79533), False, 'from datetime import timedelta, time, date\n'), ((79581, 79595), 'datetime.time', 'time', (['(20)', '(0)', '(0)'], {}), '(20, 0, 0)\n', (79585, 79595), False, 'from datetime import timedelta, time, date\n'), ((79641, 79658), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (79645, 79658), False, 'from datetime import timedelta, time, date\n'), ((81130, 81144), 'datetime.time', 'time', (['(14)', '(0)', '(0)'], {}), '(14, 0, 0)\n', (81134, 81144), False, 'from datetime import timedelta, time, date\n'), ((81192, 81206), 'datetime.time', 'time', (['(16)', '(0)', '(0)'], {}), '(16, 0, 0)\n', (81196, 81206), False, 'from datetime import timedelta, time, date\n'), ((81252, 81269), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (81256, 81269), False, 'from datetime import timedelta, time, date\n'), ((81747, 81774), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (81754, 81774), False, 'from django.core.urlresolvers import reverse\n'), ((83624, 83632), 'datetime.time', 'time', (['(16)'], {}), '(16)\n', (83628, 83632), False, 'from datetime import timedelta, time, date\n'), ((83677, 83685), 'datetime.time', 'time', (['(18)'], {}), '(18)\n', (83681, 83685), False, 'from datetime import timedelta, time, date\n'), ((84166, 84193), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (84173, 84193), False, 'from django.core.urlresolvers import reverse\n'), ((85267, 85294), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (85274, 85294), False, 'from django.core.urlresolvers import reverse\n'), ((87417, 87436), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (87426, 87436), False, 'from datetime import timedelta, time, date\n'), ((89275, 89307), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (89282, 89307), False, 'from django.core.urlresolvers import reverse\n'), ((90237, 90264), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (90244, 90264), False, 'from django.core.urlresolvers import reverse\n'), ((91602, 91637), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (91609, 91637), False, 'from django.core.urlresolvers import reverse\n'), ((1294, 1331), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (1317, 1331), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((2454, 2471), 'datetime.date', 'date', (['(2014)', '(5)', '(22)'], {}), '(2014, 5, 22)\n', (2458, 2471), False, 'from datetime import timedelta, time, date\n'), ((2506, 2523), 'datetime.date', 'date', (['(2014)', '(8)', '(15)'], {}), '(2014, 8, 15)\n', (2510, 2523), False, 'from datetime import timedelta, time, date\n'), ((16309, 16330), 'workshift.cron.CollectBlownCronJob', 'CollectBlownCronJob', ([], {}), '()\n', (16328, 16330), False, 'from workshift.cron import CollectBlownCronJob, UpdateWeeklyStandings\n'), ((16380, 16403), 'workshift.cron.UpdateWeeklyStandings', 'UpdateWeeklyStandings', ([], {}), '()\n', (16401, 16403), False, 'from workshift.cron import CollectBlownCronJob, UpdateWeeklyStandings\n'), ((17602, 17619), 'datetime.date', 'date', (['(2014)', '(1)', '(20)'], {}), '(2014, 1, 20)\n', (17606, 17619), False, 'from datetime import timedelta, time, date\n'), ((17621, 17638), 'datetime.date', 'date', (['(2014)', '(5)', '(17)'], {}), '(2014, 5, 17)\n', (17625, 17638), False, 'from datetime import timedelta, time, date\n'), ((17759, 17776), 'datetime.date', 'date', (['(2014)', '(5)', '(25)'], {}), '(2014, 5, 25)\n', (17763, 17776), False, 'from datetime import timedelta, time, date\n'), ((17778, 17795), 'datetime.date', 'date', (['(2014)', '(8)', '(16)'], {}), '(2014, 8, 16)\n', (17782, 17795), False, 'from datetime import timedelta, time, date\n'), ((17916, 17933), 'datetime.date', 'date', (['(2014)', '(8)', '(24)'], {}), '(2014, 8, 24)\n', (17920, 17933), False, 'from datetime import timedelta, time, date\n'), ((17935, 17953), 'datetime.date', 'date', (['(2014)', '(12)', '(20)'], {}), '(2014, 12, 20)\n', (17939, 17953), False, 'from datetime import timedelta, time, date\n'), ((24987, 25024), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (25010, 25024), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((40068, 40105), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (40091, 40105), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((43975, 43988), 'datetime.time', 'time', (['(8)', '(0)', '(0)'], {}), '(8, 0, 0)\n', (43979, 43988), False, 'from datetime import timedelta, time, date\n'), ((43990, 44004), 'datetime.time', 'time', (['(16)', '(0)', '(0)'], {}), '(16, 0, 0)\n', (43994, 44004), False, 'from datetime import timedelta, time, date\n'), ((44006, 44020), 'datetime.time', 'time', (['(18)', '(0)', '(0)'], {}), '(18, 0, 0)\n', (44010, 44020), False, 'from datetime import timedelta, time, date\n'), ((44040, 44054), 'datetime.time', 'time', (['(17)', '(0)', '(0)'], {}), '(17, 0, 0)\n', (44044, 44054), False, 'from datetime import timedelta, time, date\n'), ((44056, 44070), 'datetime.time', 'time', (['(21)', '(0)', '(0)'], {}), '(21, 0, 0)\n', (44060, 44070), False, 'from datetime import timedelta, time, date\n'), ((44072, 44086), 'datetime.time', 'time', (['(22)', '(0)', '(0)'], {}), '(22, 0, 0)\n', (44076, 44086), False, 'from datetime import timedelta, time, date\n'), ((48588, 48625), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (48611, 48625), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((49608, 49624), 'datetime.time', 'time', (['(23)', '(59)', '(59)'], {}), '(23, 59, 59)\n', (49612, 49624), False, 'from datetime import timedelta, time, date\n'), ((55731, 55768), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (55754, 55768), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((55922, 55959), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.mu'}), '(user=self.mu)\n', (55945, 55959), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((60051, 60086), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (60058, 60086), False, 'from django.core.urlresolvers import reverse\n'), ((60108, 60142), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (60115, 60142), False, 'from django.core.urlresolvers import reverse\n'), ((60164, 60198), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:semester_info"""'], {}), "('workshift:semester_info')\n", (60171, 60198), False, 'from django.core.urlresolvers import reverse\n'), ((60220, 60298), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:profile', kwargs={'targetUsername': self.up.user.username})\n", (60227, 60298), False, 'from django.core.urlresolvers import reverse\n'), ((60321, 60408), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:preferences"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:preferences', kwargs={'targetUsername': self.up.user.\n username})\n", (60328, 60408), False, 'from django.core.urlresolvers import reverse\n'), ((60425, 60452), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (60432, 60452), False, 'from django.core.urlresolvers import reverse\n'), ((60474, 60506), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (60481, 60506), False, 'from django.core.urlresolvers import reverse\n'), ((60528, 60562), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (60535, 60562), False, 'from django.core.urlresolvers import reverse\n'), ((60585, 60618), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:adjust_hours"""'], {}), "('workshift:adjust_hours')\n", (60592, 60618), False, 'from django.core.urlresolvers import reverse\n'), ((60641, 60677), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_workshifter"""'], {}), "('workshift:add_workshifter')\n", (60648, 60677), False, 'from django.core.urlresolvers import reverse\n'), ((60699, 60729), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (60706, 60729), False, 'from django.core.urlresolvers import reverse\n'), ((60751, 60781), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fine_date"""'], {}), "('workshift:fine_date')\n", (60758, 60781), False, 'from django.core.urlresolvers import reverse\n'), ((60804, 60866), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.wshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.wshift.pk})\n", (60811, 60866), False, 'from django.core.urlresolvers import reverse\n'), ((60889, 60957), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.winstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.winstance.pk})\n", (60896, 60957), False, 'from django.core.urlresolvers import reverse\n'), ((60980, 61040), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (60987, 61040), False, 'from django.core.urlresolvers import reverse\n'), ((61062, 61124), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.mshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.mshift.pk})\n", (61069, 61124), False, 'from django.core.urlresolvers import reverse\n'), ((61146, 61214), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.minstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.minstance.pk})\n", (61153, 61214), False, 'from django.core.urlresolvers import reverse\n'), ((61236, 61296), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.mtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.mtype.pk})\n", (61243, 61296), False, 'from django.core.urlresolvers import reverse\n'), ((61794, 61829), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (61801, 61829), False, 'from django.core.urlresolvers import reverse\n'), ((61851, 61885), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (61858, 61885), False, 'from django.core.urlresolvers import reverse\n'), ((61907, 61941), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:semester_info"""'], {}), "('workshift:semester_info')\n", (61914, 61941), False, 'from django.core.urlresolvers import reverse\n'), ((61963, 62041), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:profile', kwargs={'targetUsername': self.up.user.username})\n", (61970, 62041), False, 'from django.core.urlresolvers import reverse\n'), ((62064, 62152), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:edit_profile', kwargs={'targetUsername': self.up.user.\n username})\n", (62071, 62152), False, 'from django.core.urlresolvers import reverse\n'), ((62169, 62256), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:preferences"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:preferences', kwargs={'targetUsername': self.up.user.\n username})\n", (62176, 62256), False, 'from django.core.urlresolvers import reverse\n'), ((62274, 62301), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (62281, 62301), False, 'from django.core.urlresolvers import reverse\n'), ((62324, 62356), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (62331, 62356), False, 'from django.core.urlresolvers import reverse\n'), ((62379, 62413), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (62386, 62413), False, 'from django.core.urlresolvers import reverse\n'), ((62436, 62469), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:adjust_hours"""'], {}), "('workshift:adjust_hours')\n", (62443, 62469), False, 'from django.core.urlresolvers import reverse\n'), ((62492, 62528), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_workshifter"""'], {}), "('workshift:add_workshifter')\n", (62499, 62528), False, 'from django.core.urlresolvers import reverse\n'), ((62551, 62581), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (62558, 62581), False, 'from django.core.urlresolvers import reverse\n'), ((62604, 62634), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fine_date"""'], {}), "('workshift:fine_date')\n", (62611, 62634), False, 'from django.core.urlresolvers import reverse\n'), ((62657, 62719), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.wshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.wshift.pk})\n", (62664, 62719), False, 'from django.core.urlresolvers import reverse\n'), ((62742, 62810), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.winstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.winstance.pk})\n", (62749, 62810), False, 'from django.core.urlresolvers import reverse\n'), ((62833, 62893), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (62840, 62893), False, 'from django.core.urlresolvers import reverse\n'), ((62916, 62978), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.mshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.mshift.pk})\n", (62923, 62978), False, 'from django.core.urlresolvers import reverse\n'), ((63001, 63069), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.minstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.minstance.pk})\n", (63008, 63069), False, 'from django.core.urlresolvers import reverse\n'), ((63092, 63152), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.mtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.mtype.pk})\n", (63099, 63152), False, 'from django.core.urlresolvers import reverse\n'), ((63657, 63692), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:start_semester"""'], {}), "('workshift:start_semester')\n", (63664, 63692), False, 'from django.core.urlresolvers import reverse\n'), ((63714, 63748), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (63721, 63748), False, 'from django.core.urlresolvers import reverse\n'), ((63770, 63804), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:semester_info"""'], {}), "('workshift:semester_info')\n", (63777, 63804), False, 'from django.core.urlresolvers import reverse\n'), ((63826, 63904), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:profile', kwargs={'targetUsername': self.up.user.username})\n", (63833, 63904), False, 'from django.core.urlresolvers import reverse\n'), ((63927, 64015), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_profile"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:edit_profile', kwargs={'targetUsername': self.up.user.\n username})\n", (63934, 64015), False, 'from django.core.urlresolvers import reverse\n'), ((64033, 64120), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:preferences"""'], {'kwargs': "{'targetUsername': self.up.user.username}"}), "('workshift:preferences', kwargs={'targetUsername': self.up.user.\n username})\n", (64040, 64120), False, 'from django.core.urlresolvers import reverse\n'), ((64138, 64165), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:manage"""'], {}), "('workshift:manage')\n", (64145, 64165), False, 'from django.core.urlresolvers import reverse\n'), ((64188, 64220), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (64195, 64220), False, 'from django.core.urlresolvers import reverse\n'), ((64243, 64277), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:assign_shifts"""'], {}), "('workshift:assign_shifts')\n", (64250, 64277), False, 'from django.core.urlresolvers import reverse\n'), ((64300, 64333), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:adjust_hours"""'], {}), "('workshift:adjust_hours')\n", (64307, 64333), False, 'from django.core.urlresolvers import reverse\n'), ((64356, 64392), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_workshifter"""'], {}), "('workshift:add_workshifter')\n", (64363, 64392), False, 'from django.core.urlresolvers import reverse\n'), ((64415, 64445), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:add_shift"""'], {}), "('workshift:add_shift')\n", (64422, 64445), False, 'from django.core.urlresolvers import reverse\n'), ((64468, 64498), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fine_date"""'], {}), "('workshift:fine_date')\n", (64475, 64498), False, 'from django.core.urlresolvers import reverse\n'), ((64521, 64583), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.wshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.wshift.pk})\n", (64528, 64583), False, 'from django.core.urlresolvers import reverse\n'), ((64606, 64674), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.winstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.winstance.pk})\n", (64613, 64674), False, 'from django.core.urlresolvers import reverse\n'), ((64697, 64757), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.wtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.wtype.pk})\n", (64704, 64757), False, 'from django.core.urlresolvers import reverse\n'), ((64780, 64842), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_shift"""'], {'kwargs': "{'pk': self.mshift.pk}"}), "('workshift:edit_shift', kwargs={'pk': self.mshift.pk})\n", (64787, 64842), False, 'from django.core.urlresolvers import reverse\n'), ((64865, 64933), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_instance"""'], {'kwargs': "{'pk': self.minstance.pk}"}), "('workshift:edit_instance', kwargs={'pk': self.minstance.pk})\n", (64872, 64933), False, 'from django.core.urlresolvers import reverse\n'), ((64956, 65016), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:edit_type"""'], {'kwargs': "{'pk': self.mtype.pk}"}), "('workshift:edit_type', kwargs={'pk': self.mtype.pk})\n", (64963, 65016), False, 'from django.core.urlresolvers import reverse\n'), ((66308, 66345), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.ru'}), '(user=self.ru)\n', (66331, 66345), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((72201, 72238), 'base.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user': 'self.wu'}), '(user=self.wu)\n', (72224, 72238), False, 'from base.models import User, UserProfile, ProfileRequest\n'), ((73008, 73016), 'datetime.time', 'time', (['(16)'], {}), '(16)\n', (73012, 73016), False, 'from datetime import timedelta, time, date\n'), ((73039, 73047), 'datetime.time', 'time', (['(18)'], {}), '(18)\n', (73043, 73047), False, 'from datetime import timedelta, time, date\n'), ((73775, 73792), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (73779, 73792), False, 'from datetime import timedelta, time, date\n'), ((75036, 75053), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (75040, 75053), False, 'from datetime import timedelta, time, date\n'), ((76649, 76666), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (76653, 76666), False, 'from datetime import timedelta, time, date\n'), ((78947, 78964), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (78951, 78964), False, 'from datetime import timedelta, time, date\n'), ((80371, 80388), 'datetime.date', 'date', (['(2014)', '(5)', '(27)'], {}), '(2014, 5, 27)\n', (80375, 80388), False, 'from datetime import timedelta, time, date\n'), ((88697, 88729), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:fill_shifts"""'], {}), "('workshift:fill_shifts')\n", (88704, 88729), False, 'from django.core.urlresolvers import reverse\n'), ((1672, 1688), 'django.core.urlresolvers.reverse', 'reverse', (['"""login"""'], {}), "('login')\n", (1679, 1688), False, 'from django.core.urlresolvers import reverse\n'), ((3859, 3864), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (3862, 3864), False, 'from django.utils.timezone import now, localtime\n'), ((4010, 4027), 'datetime.timedelta', 'timedelta', ([], {'days': '(6)'}), '(days=6)\n', (4019, 4027), False, 'from datetime import timedelta, time, date\n'), ((15636, 15641), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (15639, 15641), False, 'from django.utils.timezone import now, localtime\n'), ((15823, 15842), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(18)'}), '(weeks=18)\n', (15832, 15842), False, 'from datetime import timedelta, time, date\n'), ((16827, 16845), 'datetime.date', 'date', (['(2014)', '(12)', '(20)'], {}), '(2014, 12, 20)\n', (16831, 16845), False, 'from datetime import timedelta, time, date\n'), ((16963, 16980), 'datetime.date', 'date', (['(2015)', '(3)', '(20)'], {}), '(2015, 3, 20)\n', (16967, 16980), False, 'from datetime import timedelta, time, date\n'), ((17098, 17114), 'datetime.date', 'date', (['(2014)', '(4)', '(1)'], {}), '(2014, 4, 1)\n', (17102, 17114), False, 'from datetime import timedelta, time, date\n'), ((17232, 17249), 'datetime.date', 'date', (['(2014)', '(7)', '(20)'], {}), '(2014, 7, 20)\n', (17236, 17249), False, 'from datetime import timedelta, time, date\n'), ((17365, 17381), 'datetime.date', 'date', (['(2014)', '(8)', '(1)'], {}), '(2014, 8, 1)\n', (17369, 17381), False, 'from datetime import timedelta, time, date\n'), ((17497, 17515), 'datetime.date', 'date', (['(2014)', '(10)', '(20)'], {}), '(2014, 10, 20)\n', (17501, 17515), False, 'from datetime import timedelta, time, date\n'), ((20412, 20417), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (20415, 20417), False, 'from django.utils.timezone import now, localtime\n'), ((21596, 21601), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (21599, 21601), False, 'from django.utils.timezone import now, localtime\n'), ((24600, 24617), 'datetime.timedelta', 'timedelta', ([], {'days': '(6)'}), '(days=6)\n', (24609, 24617), False, 'from datetime import timedelta, time, date\n'), ((25668, 25686), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (25677, 25686), False, 'from datetime import timedelta, time, date\n'), ((26135, 26152), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (26144, 26152), False, 'from datetime import timedelta, time, date\n'), ((40180, 40185), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (40183, 40185), False, 'from django.utils.timezone import now, localtime\n'), ((40314, 40331), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (40323, 40331), False, 'from datetime import timedelta, time, date\n'), ((48700, 48705), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (48703, 48705), False, 'from django.utils.timezone import now, localtime\n'), ((48834, 48851), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (48843, 48851), False, 'from datetime import timedelta, time, date\n'), ((49579, 49584), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (49582, 49584), False, 'from django.utils.timezone import now, localtime\n'), ((56156, 56173), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (56165, 56173), False, 'from datetime import timedelta, time, date\n'), ((57112, 57130), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (57121, 57130), False, 'from datetime import timedelta, time, date\n'), ((57364, 57382), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (57373, 57382), False, 'from datetime import timedelta, time, date\n'), ((61543, 61577), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (61550, 61577), False, 'from django.core.urlresolvers import reverse\n'), ((63399, 63433), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (63406, 63433), False, 'from django.core.urlresolvers import reverse\n'), ((65264, 65298), 'django.core.urlresolvers.reverse', 'reverse', (['"""workshift:view_semester"""'], {}), "('workshift:view_semester')\n", (65271, 65298), False, 'from django.core.urlresolvers import reverse\n'), ((66552, 66557), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (66555, 66557), False, 'from django.utils.timezone import now, localtime\n'), ((66867, 66886), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(18)'}), '(weeks=18)\n', (66876, 66886), False, 'from datetime import timedelta, time, date\n'), ((72313, 72318), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (72316, 72318), False, 'from django.utils.timezone import now, localtime\n'), ((72453, 72470), 'datetime.timedelta', 'timedelta', ([], {'days': '(6)'}), '(days=6)\n', (72462, 72470), False, 'from datetime import timedelta, time, date\n'), ((87375, 87380), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (87378, 87380), False, 'from django.utils.timezone import now, localtime\n'), ((87580, 87597), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (87589, 87597), False, 'from datetime import timedelta, time, date\n'), ((87729, 87746), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (87738, 87746), False, 'from datetime import timedelta, time, date\n'), ((89092, 89115), 'managers.models.Manager.objects.count', 'Manager.objects.count', ([], {}), '()\n', (89113, 89115), False, 'from managers.models import Manager\n'), ((89805, 89810), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (89808, 89810), False, 'from django.utils.timezone import now, localtime\n'), ((90118, 90137), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(18)'}), '(weeks=18)\n', (90127, 90137), False, 'from datetime import timedelta, time, date\n'), ((16538, 16543), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (16541, 16543), False, 'from django.utils.timezone import now, localtime\n')] |
import json
from packlib.base import ProxmoxAction
class ClusterCephFlagsSetFlagsAction(ProxmoxAction):
"""
Set/Unset multiple ceph flags at once.
"""
def run(
self,
nobackfill=None,
nodeep_scrub=None,
nodown=None,
noin=None,
noout=None,
norebalance=None,
norecover=None,
noscrub=None,
notieragent=None,
noup=None,
pause=None,
profile_name=None,
):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["nobackfill", nobackfill, "boolean"],
["nodeep-scrub", nodeep_scrub, "boolean"],
["nodown", nodown, "boolean"],
["noin", noin, "boolean"],
["noout", noout, "boolean"],
["norebalance", norebalance, "boolean"],
["norecover", norecover, "boolean"],
["noscrub", noscrub, "boolean"],
["notieragent", notieragent, "boolean"],
["noup", noup, "boolean"],
["pause", pause, "boolean"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.put(f"cluster/ceph/flags", **proxmox_kwargs)
| [
"json.loads"
] | [((1282, 1304), 'json.loads', 'json.loads', (['api_arg[1]'], {}), '(api_arg[1])\n', (1292, 1304), False, 'import json\n')] |
import json
import os
import pickledb
import requests
TRACKING_NUMBER = os.getenv("TRACKING_NUMBER")
TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID")
USER_AGENT = os.getenv("USER_AGENT")
def main():
db = pickledb.load("state.db", auto_dump=True)
cainiao_response = requests.get(
f"https://slw16.global.cainiao.com/trackRefreshRpc/refresh.json?mailNo={TRACKING_NUMBER}",
headers={
"User-Agent": USER_AGENT,
},
)
data = json.loads(cainiao_response.text[1:-1])
db_data = db.get(TRACKING_NUMBER)
if db_data and db_data.get("status") == data.get("status"):
# nothing changed, exit
return
# status changed, store and notify
db.set(TRACKING_NUMBER, data)
requests.post(
f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendMessage",
json={"chat_id": TELEGRAM_CHAT_ID, "text": f"{TRACKING_NUMBER}: {data.get('status')}"},
)
if __name__ == "__main__":
main()
| [
"pickledb.load",
"json.loads",
"os.getenv",
"requests.get"
] | [((74, 102), 'os.getenv', 'os.getenv', (['"""TRACKING_NUMBER"""'], {}), "('TRACKING_NUMBER')\n", (83, 102), False, 'import os\n'), ((124, 155), 'os.getenv', 'os.getenv', (['"""TELEGRAM_BOT_TOKEN"""'], {}), "('TELEGRAM_BOT_TOKEN')\n", (133, 155), False, 'import os\n'), ((175, 204), 'os.getenv', 'os.getenv', (['"""TELEGRAM_CHAT_ID"""'], {}), "('TELEGRAM_CHAT_ID')\n", (184, 204), False, 'import os\n'), ((218, 241), 'os.getenv', 'os.getenv', (['"""USER_AGENT"""'], {}), "('USER_AGENT')\n", (227, 241), False, 'import os\n'), ((265, 306), 'pickledb.load', 'pickledb.load', (['"""state.db"""'], {'auto_dump': '(True)'}), "('state.db', auto_dump=True)\n", (278, 306), False, 'import pickledb\n'), ((330, 479), 'requests.get', 'requests.get', (['f"""https://slw16.global.cainiao.com/trackRefreshRpc/refresh.json?mailNo={TRACKING_NUMBER}"""'], {'headers': "{'User-Agent': USER_AGENT}"}), "(\n f'https://slw16.global.cainiao.com/trackRefreshRpc/refresh.json?mailNo={TRACKING_NUMBER}'\n , headers={'User-Agent': USER_AGENT})\n", (342, 479), False, 'import requests\n'), ((527, 566), 'json.loads', 'json.loads', (['cainiao_response.text[1:-1]'], {}), '(cainiao_response.text[1:-1])\n', (537, 566), False, 'import json\n')] |
from cartoon.cli import main as main_func
import sys
if __name__ == "__main__":
main_func(sys.kwargs) | [
"cartoon.cli.main"
] | [((85, 106), 'cartoon.cli.main', 'main_func', (['sys.kwargs'], {}), '(sys.kwargs)\n', (94, 106), True, 'from cartoon.cli import main as main_func\n')] |
import numpy as np
class ActivationFunction(object):
@staticmethod
def activation(input_data, activation_name = 'relu'):
"""
activation function
Parameters
----------
input_data : {array-like, tensor(3-dim)} of shape (in_data_col, in_data_row, in_data_depth)
if activation is softmax, the input data is vector
activation_name : {string-like, scalar} value of {'sigmoid', 'tanh', 'relu'(default), 'softmax', 'none'}
Returns
-------
input_data : {array-like, tensor(3-dim)} of shape (in_data_col, in_data_row, in_data_depth)
after activation (origin data shape)
"""
if activation_name == 'sigmoid':
return 1.0/(1.0 + np.exp(-input_data))
elif activation_name == 'tanh':
return np.tanh(input_data)
elif activation_name == 'relu':
return (np.abs(input_data) + input_data)/2
elif activation_name == 'softmax': # input data is vector
input_data = input_data - np.max(input_data)
exp_input_data = np.exp(input_data)
return exp_input_data / np.sum(exp_input_data)
elif activation_name == 'none': # not use any activation
return input_data
else:
raise AttributeError("activation name wrong")
@staticmethod
def activation_prime(input_data, activation_name = 'relu'):
"""
activation function derivative
Parameters
----------
input_data : {array-like, tensor(3-dim)} of shape (in_data_col, in_data_row, in_data_depth)
activation_name : {string-like, scalar} value of {'sigmoid', 'tanh', 'relu'(default), 'softmax', 'none'}
Returns
-------
result : {array-like, tensor(3-dim)} of shape (in_data_col, in_data_row, in_data_depth)
activation derivative
"""
if activation_name == 'sigmoid':
return ActivationFunction.activation(activation_name=activation_name, input_data=input_data) * (1 - ActivationFunction.activation( activation_name=activation_name, input_data=input_data))
elif activation_name == 'tanh':
return 1 - np.square( ActivationFunction.activation(activation_name=activation_name, input_data = input_data))
elif activation_name == 'relu':
return np.where( input_data > 0, 1, 0)
elif activation_name == 'softmax':
input_data = np.squeeze(input_data)
length = len(input_data)
result = np.zeros((length, length))
for i in range(length):
for j in range(length):
result[i, j] = ActivationFunction.__softmax(i = i, j = j, a = input_data)
return result
elif activation_name == 'none':
return 1
else:
raise AttributeError("activation name wrong")
@staticmethod
def __softmax(i, j, a):
if i == j:
return a[i] * (1 - a[i])
else:
return -a[i] * a[j]
# input_data_test = np.array(
# [
# [
# [1, 3, 5, 4, 7],
# [2, 3, 2, 1, 0],
# [7, 8, 1, 2, 3],
# [3, 2, 9, 8, 7],
# [2, 3, -4, 0, 2]
# ]
# ]
# )
# print(ActivationFunction.activation_prime(activation_name='tanh', input_data=input_data_test)) | [
"numpy.abs",
"numpy.where",
"numpy.tanh",
"numpy.squeeze",
"numpy.exp",
"numpy.max",
"numpy.zeros",
"numpy.sum"
] | [((875, 894), 'numpy.tanh', 'np.tanh', (['input_data'], {}), '(input_data)\n', (882, 894), True, 'import numpy as np\n'), ((786, 805), 'numpy.exp', 'np.exp', (['(-input_data)'], {}), '(-input_data)\n', (792, 805), True, 'import numpy as np\n'), ((2447, 2477), 'numpy.where', 'np.where', (['(input_data > 0)', '(1)', '(0)'], {}), '(input_data > 0, 1, 0)\n', (2455, 2477), True, 'import numpy as np\n'), ((1144, 1162), 'numpy.exp', 'np.exp', (['input_data'], {}), '(input_data)\n', (1150, 1162), True, 'import numpy as np\n'), ((2556, 2578), 'numpy.squeeze', 'np.squeeze', (['input_data'], {}), '(input_data)\n', (2566, 2578), True, 'import numpy as np\n'), ((2650, 2676), 'numpy.zeros', 'np.zeros', (['(length, length)'], {}), '((length, length))\n', (2658, 2676), True, 'import numpy as np\n'), ((956, 974), 'numpy.abs', 'np.abs', (['input_data'], {}), '(input_data)\n', (962, 974), True, 'import numpy as np\n'), ((1096, 1114), 'numpy.max', 'np.max', (['input_data'], {}), '(input_data)\n', (1102, 1114), True, 'import numpy as np\n'), ((1199, 1221), 'numpy.sum', 'np.sum', (['exp_input_data'], {}), '(exp_input_data)\n', (1205, 1221), True, 'import numpy as np\n')] |
from typing import TypeVar, Union
from uuid import uuid4, UUID
from citrine._session import Session
from citrine._utils.functions import migrate_deprecated_argument
from citrine.informatics.workflows import PredictorEvaluationWorkflow, DesignWorkflow
from citrine.resources.predictor_evaluation_workflow import PredictorEvaluationWorkflowCollection
from citrine.resources.design_workflow import DesignWorkflowCollection
from tests.utils.fakes import FakeCollection, FakePredictorEvaluationWorkflow
WorkflowType = TypeVar('WorkflowType', bound='Workflow')
class FakeWorkflowCollection(FakeCollection[WorkflowType]):
def __init__(self, project_id, session: Session):
FakeCollection.__init__(self)
self.project_id = project_id
self.session = session
def register(self, workflow: WorkflowType) -> WorkflowType:
workflow = FakeCollection.register(self, workflow)
workflow.project_id = self.project_id
return workflow
def archive(self, uid: Union[UUID, str] = None, workflow_id: Union[UUID, str] = None):
# Search for workflow via UID to ensure exists
# If found, flip archived=True with no return
uid = migrate_deprecated_argument(uid, "uid", workflow_id, "workflow_id")
workflow = self.get(uid)
workflow.archived = True
self.update(workflow)
class FakeDesignWorkflowCollection(FakeWorkflowCollection[DesignWorkflow], DesignWorkflowCollection):
pass
class FakePredictorEvaluationWorkflowCollection(FakeWorkflowCollection[PredictorEvaluationWorkflow], PredictorEvaluationWorkflowCollection):
def create_default(self, *, predictor_id: UUID) -> PredictorEvaluationWorkflow:
pew = FakePredictorEvaluationWorkflow(
name=f"Default predictor evaluation workflow",
description="",
evaluators=[]
)
pew.project_id = self.project_id
pew.uid = uuid4()
pew._session = self.session
return pew | [
"tests.utils.fakes.FakeCollection.__init__",
"tests.utils.fakes.FakeCollection.register",
"tests.utils.fakes.FakePredictorEvaluationWorkflow",
"citrine._utils.functions.migrate_deprecated_argument",
"uuid.uuid4",
"typing.TypeVar"
] | [((516, 557), 'typing.TypeVar', 'TypeVar', (['"""WorkflowType"""'], {'bound': '"""Workflow"""'}), "('WorkflowType', bound='Workflow')\n", (523, 557), False, 'from typing import TypeVar, Union\n'), ((683, 712), 'tests.utils.fakes.FakeCollection.__init__', 'FakeCollection.__init__', (['self'], {}), '(self)\n', (706, 712), False, 'from tests.utils.fakes import FakeCollection, FakePredictorEvaluationWorkflow\n'), ((865, 904), 'tests.utils.fakes.FakeCollection.register', 'FakeCollection.register', (['self', 'workflow'], {}), '(self, workflow)\n', (888, 904), False, 'from tests.utils.fakes import FakeCollection, FakePredictorEvaluationWorkflow\n'), ((1190, 1257), 'citrine._utils.functions.migrate_deprecated_argument', 'migrate_deprecated_argument', (['uid', '"""uid"""', 'workflow_id', '"""workflow_id"""'], {}), "(uid, 'uid', workflow_id, 'workflow_id')\n", (1217, 1257), False, 'from citrine._utils.functions import migrate_deprecated_argument\n'), ((1709, 1823), 'tests.utils.fakes.FakePredictorEvaluationWorkflow', 'FakePredictorEvaluationWorkflow', ([], {'name': 'f"""Default predictor evaluation workflow"""', 'description': '""""""', 'evaluators': '[]'}), "(name=\n f'Default predictor evaluation workflow', description='', evaluators=[])\n", (1740, 1823), False, 'from tests.utils.fakes import FakeCollection, FakePredictorEvaluationWorkflow\n'), ((1924, 1931), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1929, 1931), False, 'from uuid import uuid4, UUID\n')] |
from abc import abstractmethod
from typing import Sequence, Tuple, Union
from .card import Card
import random
class Deck(Tuple[Card]):
"""Represents a Deck. Decks are immutable."""
def __new__(cls, *args: Union[Card, Tuple[int, Card]]):
"""
Creates a new deck from the given cards.
:param args:
A sequence of Card objects and/or 2-tuples where the first element is the count and the second the card.
:raises ValueError:
:raises TypeError:
"""
values = []
def is_a_tuple(c):
return isinstance(c, tuple)
def is_a_card(c):
return isinstance(c, Card)
for a in args:
if is_a_card(a):
values.append(a)
elif is_a_tuple(a):
if len(a) != 2:
raise ValueError("tuple must have a size of 2.")
count, card = a
count = int(count)
if not is_a_card(card):
raise ValueError("the card in a tuple must be an instance of Card")
if count < 0:
raise ValueError("count cannot be negative.")
values.extend([card] * count)
else:
raise ValueError("An item of cards was not of a Card type nor a tuple.")
return super(Deck, cls).__new__(cls, tuple(values))
@property
def empty(self) -> bool:
return len(self) == 0
def tally(deck: Deck) -> Sequence[Tuple[Card, int]]:
"""Tallies a Deck: counts each instance of a card. This is a very basic and useful operation.
:raises ValueError: deck is not a Deck.
:param deck: The deck to tally.
:return: A sequence of tuples, where the first member is the card and the second the count.
"""
if not isinstance(deck, Deck):
raise ValueError("Expected a Deck.")
counts = {}
for card in deck:
if card in counts:
counts[card] = counts[card] + 1
else:
counts[card] = 1
return tuple((card, count) for card, count in counts.items())
def opening_hand(deck: Deck, count: int = 7) -> Union[Tuple, Tuple[Card]]:
"""Draws an opening hand from the given deck.
:param deck:
the deck to draw from, if it is empty, opening_hand always returns the empty tuple.
:param count:
how many cards to draw, may not be negative. If zero, returns the empty tuple.
default is 7 per the rules of standard Magic.
may not be less than len(deck).
:raise ValueError: count is negative. either parameter was of the wrong type.
"""
if not isinstance(deck, Deck):
raise ValueError("Expected deck to be a Deck.")
if not isinstance(count, int) or count < 0:
raise ValueError("count must be an integer >= 0.")
if len(deck) < count:
raise ValueError("count cannot be less than the number of cards in the deck.")
if count == 0:
return ()
if len(deck) == 0:
return ()
return tuple(random.sample(deck, count))
| [
"random.sample"
] | [((3042, 3068), 'random.sample', 'random.sample', (['deck', 'count'], {}), '(deck, count)\n', (3055, 3068), False, 'import random\n')] |
import os
import requests
import gzip
import shutil
def extract(input_path, output_path):
if os.path.exists(output_path):
return False
with gzip.open(input_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return True
def download(url, output_path):
if os.path.exists(output_path):
return False
r = requests.get(url, stream=True)
with open(output_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
r.close()
return True
def get_paths(base_path):
if not os.path.exists(base_path):
os.makedirs(base_path)
cache_path = os.path.join(base_path, 'downloads')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
data_path = os.path.join(base_path, 'ticks')
if not os.path.exists(data_path):
os.makedirs(data_path)
return (cache_path + '/{0}.csv.gz', data_path + '/{0}.csv')
def download_list(base_path, base_url, targets):
cache_format, data_format = get_paths(base_path)
for target in targets:
url = base_url.format(target)
cache_file = cache_format.format(target)
data_file = data_format.format(target)
print('Downloading {0}...'.format(target), end='', flush=True)
if download(url, cache_file):
print('done.')
else:
print('skipped.')
print('Extracting {0}...'.format(target), end='', flush=True)
if extract(cache_file, data_file):
print('done.')
else:
print('skipped.')
if __name__ == "__main__":
targets = ['bitfinexUSD', 'bitflyerJPY', 'bitstampUSD', 'coinbaseUSD', 'korbitKRW', 'zaifJPY']
base_url = 'https://api.bitcoincharts.com/v1/csv/{0}.csv.gz'
base_path = 'e:/datasets/'
download_list(base_path, base_url, targets) | [
"os.path.exists",
"shutil.copyfileobj",
"os.makedirs",
"gzip.open",
"os.path.join",
"requests.get"
] | [((98, 125), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (112, 125), False, 'import os\n'), ((341, 368), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (355, 368), False, 'import os\n'), ((399, 429), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (411, 429), False, 'import requests\n'), ((694, 730), 'os.path.join', 'os.path.join', (['base_path', '"""downloads"""'], {}), "(base_path, 'downloads')\n", (706, 730), False, 'import os\n'), ((818, 850), 'os.path.join', 'os.path.join', (['base_path', '"""ticks"""'], {}), "(base_path, 'ticks')\n", (830, 850), False, 'import os\n'), ((157, 184), 'gzip.open', 'gzip.open', (['input_path', '"""rb"""'], {}), "(input_path, 'rb')\n", (166, 184), False, 'import gzip\n'), ((619, 644), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (633, 644), False, 'import os\n'), ((654, 676), 'os.makedirs', 'os.makedirs', (['base_path'], {}), '(base_path)\n', (665, 676), False, 'import os\n'), ((742, 768), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (756, 768), False, 'import os\n'), ((778, 801), 'os.makedirs', 'os.makedirs', (['cache_path'], {}), '(cache_path)\n', (789, 801), False, 'import os\n'), ((862, 887), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (876, 887), False, 'import os\n'), ((897, 919), 'os.makedirs', 'os.makedirs', (['data_path'], {}), '(data_path)\n', (908, 919), False, 'import os\n'), ((253, 284), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (271, 284), False, 'import shutil\n')] |
from sustainlektor.cli import main
main()
| [
"sustainlektor.cli.main"
] | [((36, 42), 'sustainlektor.cli.main', 'main', ([], {}), '()\n', (40, 42), False, 'from sustainlektor.cli import main\n')] |
#!/usr/bin/env python3
import networkx as nx
import logging
from multiprocessing import Pool
from typing import List, Callable, Any
from ci.model.stage import Stage
StageVisitor = Callable[[str, Any], None]
class StageGraph:
graph: nx.DiGraph
stages: List[Stage]
def __init__(self, graph: nx.DiGraph) -> None:
self.graph = graph
def graph(self) -> nx.DiGraph:
return self.graph
def walk_groups(self, visitor: StageVisitor, **kwargs) -> None:
"""Calls the visitor for every node that has 0 outgoing edges.
Afterwards the edges are removed and the visitor is called for the next group."""
pool = Pool() # ThreadPool()
graph_copy = nx.DiGraph.copy(self.graph)
logging.debug("Nodes in graph: %d", len(graph_copy))
while len(graph_copy):
nodes = [n for n in graph_copy if graph_copy.out_degree(n) == 0]
logging.debug("Queueing %s", nodes)
args = [(graph_copy.nodes[node]["stage"], kwargs) for node in nodes]
pool.starmap(visitor, args)
for node in nodes:
logging.debug("Removing node %s", node)
graph_copy.remove_node(node)
pool.close()
pool.join()
| [
"logging.debug",
"multiprocessing.Pool",
"networkx.DiGraph.copy"
] | [((662, 668), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (666, 668), False, 'from multiprocessing import Pool\n'), ((706, 733), 'networkx.DiGraph.copy', 'nx.DiGraph.copy', (['self.graph'], {}), '(self.graph)\n', (721, 733), True, 'import networkx as nx\n'), ((917, 952), 'logging.debug', 'logging.debug', (['"""Queueing %s"""', 'nodes'], {}), "('Queueing %s', nodes)\n", (930, 952), False, 'import logging\n'), ((1123, 1162), 'logging.debug', 'logging.debug', (['"""Removing node %s"""', 'node'], {}), "('Removing node %s', node)\n", (1136, 1162), False, 'import logging\n')] |
#!/usr/bin/env python3
'''
$ subcommands.py # returncode=2 stderr=True
usage: subcommands.py [-h] {pairs,kwargs,auto}
subcommands.py: error: the following arguments are required: subcommand
$ subcommands.py --help
> usage: subcommands.py {pairs,kwargs,auto}
>
> positional arguments:
> {pairs,kwargs,auto}
$ subcommands.py auto --help
> usage: subcommands.py auto {one,two,on1,tw2}
>
> positional arguments:
> {one,two,on1,tw2}
$ subcommands.py pairs --help
> usage: subcommands.py pairs {one,two,on1,tw2}
>
> positional arguments:
> {one,two,on1,tw2}
$ subcommands.py kwargs --help # glob=True
> usage: subcommands.py kwargs {*}
>
> positional arguments:
> {*}
$ subcommands.py auto one 123 456 789
subcommand1 arg='123' remaining=('456', '789') flag1=False
$ subcommands.py auto two 345 678 9
subcommand2 arg='345' remaining=('678', '9') flag2=False
$ subcommands.py <TAB>
pairs\x0bkwargs\x0bauto
$ subcommands.py au<TAB>
auto
$ subcommands.py auto <TAB>
one\x0btwo\x0bon1\x0btw2
$ subcommands.py auto o<TAB>
one\x0bon1
'''
from hashbang import command, subcommands, Argument, NoMatchingDelegate
import sys
@command
def subcommand1(arg, *remaining, flag1=False):
print(
'subcommand1 arg={} remaining={} flag1={}'
.format(*map(repr, (arg, remaining, flag1))))
@command
def subcommand2(arg, *remaining, flag2=False):
print(
'subcommand2 arg={} remaining={} flag2={}'
.format(*map(repr, (arg, remaining, flag2))))
# Using pairs preserves the insertion order
pairs = subcommands(
('one', subcommand1),
('two', subcommand2),
('on1', subcommand1),
('tw2', subcommand2))
# Using keyword arguments preserve insertion order in python 3.6 or above,
# and sorts by key on lower versions
kwargs = subcommands(
one=subcommand1,
two=subcommand2,
on1=subcommand1,
tw2=subcommand2)
@command.delegator
def main(
subcommand: Argument(choices=('pairs', 'kwargs', 'auto')),
*_REMAINDER_):
if subcommand == 'auto':
if sys.version_info >= (3, 6):
return kwargs.execute(_REMAINDER_)
else:
return pairs.execute(_REMAINDER_)
elif subcommand == 'pairs':
return pairs.execute(_REMAINDER_)
elif subcommand == 'kwargs':
return kwargs.execute(_REMAINDER_)
else:
raise NoMatchingDelegate()
if __name__ == '__main__':
main.execute()
| [
"hashbang.subcommands",
"hashbang.Argument",
"hashbang.NoMatchingDelegate"
] | [((1550, 1654), 'hashbang.subcommands', 'subcommands', (["('one', subcommand1)", "('two', subcommand2)", "('on1', subcommand1)", "('tw2', subcommand2)"], {}), "(('one', subcommand1), ('two', subcommand2), ('on1', subcommand1\n ), ('tw2', subcommand2))\n", (1561, 1654), False, 'from hashbang import command, subcommands, Argument, NoMatchingDelegate\n'), ((1805, 1884), 'hashbang.subcommands', 'subcommands', ([], {'one': 'subcommand1', 'two': 'subcommand2', 'on1': 'subcommand1', 'tw2': 'subcommand2'}), '(one=subcommand1, two=subcommand2, on1=subcommand1, tw2=subcommand2)\n', (1816, 1884), False, 'from hashbang import command, subcommands, Argument, NoMatchingDelegate\n'), ((1969, 2014), 'hashbang.Argument', 'Argument', ([], {'choices': "('pairs', 'kwargs', 'auto')"}), "(choices=('pairs', 'kwargs', 'auto'))\n", (1977, 2014), False, 'from hashbang import command, subcommands, Argument, NoMatchingDelegate\n'), ((2388, 2408), 'hashbang.NoMatchingDelegate', 'NoMatchingDelegate', ([], {}), '()\n', (2406, 2408), False, 'from hashbang import command, subcommands, Argument, NoMatchingDelegate\n')] |
from unittest import TestCase, mock
from unittest.mock import MagicMock
from ornitho import Field
from ornitho.api_exception import APIException
class TestField(TestCase):
def setUp(self):
self.field_json = {
"id": "1",
"group": "OBS",
"name": "RESTING_HABITAT",
"text": "(Rast)Habitat",
"default": "0",
"mandatory": "0",
"empty_choice": "1",
}
self.field = Field.create_from_ornitho_json(self.field_json)
def test_get(self):
Field.list_all = MagicMock(
return_value=[
Field.create_from_ornitho_json(
{
"id": "1",
"group": "OBS",
"name": "RESTING_HABITAT",
"text": "(Rast)Habitat",
"default": "0",
"mandatory": "0",
"empty_choice": "1",
}
),
Field.create_from_ornitho_json(
{
"id": "2",
"group": "OBS",
"name": "ACCURACY_OF_LOCATION",
"text": "Genauigkeit der Ortsangabe",
"default": "0",
"mandatory": "0",
"empty_choice": "1",
}
),
]
)
field = Field.get(1)
self.assertEqual(1, field.id_)
# Test Exception
self.assertRaises(
APIException,
lambda: Field.get(4),
)
def test_refresh(self):
self.assertRaises(
NotImplementedError,
lambda: self.field.refresh(),
)
def test_group(self):
self.assertEqual(self.field_json["group"], self.field.group)
def test_name(self):
self.assertEqual(self.field_json["name"], self.field.name)
def test_text(self):
self.assertEqual(self.field_json["text"], self.field.text)
def test_default(self):
self.assertEqual(int(self.field_json["default"]), self.field.default)
def test_mandatory(self):
self.assertEqual(
False if self.field_json["mandatory"] == "0" else True, self.field.mandatory
)
def test_empty_choice(self):
self.assertEqual(
False if self.field_json["empty_choice"] == "0" else True,
self.field.empty_choice,
)
@mock.patch("ornitho.model.field.APIRequester")
@mock.patch("ornitho.model.field.FieldOption")
def test_options(self, mock_field_option, mock_requester):
class MockRequesterClass:
def request(self, method, url):
return ["option1", "option2"], "pk"
def enter_requester(requester):
return MockRequesterClass()
mock_requester.return_value.__enter__ = enter_requester
mock_field_option.create_from_ornitho_json.return_value = ["Created!"]
self.assertEqual(
[
mock_field_option.create_from_ornitho_json.return_value,
mock_field_option.create_from_ornitho_json.return_value,
],
self.field.options,
)
| [
"ornitho.Field.get",
"ornitho.Field.create_from_ornitho_json",
"unittest.mock.patch"
] | [((2538, 2584), 'unittest.mock.patch', 'mock.patch', (['"""ornitho.model.field.APIRequester"""'], {}), "('ornitho.model.field.APIRequester')\n", (2548, 2584), False, 'from unittest import TestCase, mock\n'), ((2590, 2635), 'unittest.mock.patch', 'mock.patch', (['"""ornitho.model.field.FieldOption"""'], {}), "('ornitho.model.field.FieldOption')\n", (2600, 2635), False, 'from unittest import TestCase, mock\n'), ((473, 520), 'ornitho.Field.create_from_ornitho_json', 'Field.create_from_ornitho_json', (['self.field_json'], {}), '(self.field_json)\n', (503, 520), False, 'from ornitho import Field\n'), ((1493, 1505), 'ornitho.Field.get', 'Field.get', (['(1)'], {}), '(1)\n', (1502, 1505), False, 'from ornitho import Field\n'), ((1644, 1656), 'ornitho.Field.get', 'Field.get', (['(4)'], {}), '(4)\n', (1653, 1656), False, 'from ornitho import Field\n'), ((625, 799), 'ornitho.Field.create_from_ornitho_json', 'Field.create_from_ornitho_json', (["{'id': '1', 'group': 'OBS', 'name': 'RESTING_HABITAT', 'text':\n '(Rast)Habitat', 'default': '0', 'mandatory': '0', 'empty_choice': '1'}"], {}), "({'id': '1', 'group': 'OBS', 'name':\n 'RESTING_HABITAT', 'text': '(Rast)Habitat', 'default': '0', 'mandatory':\n '0', 'empty_choice': '1'})\n", (655, 799), False, 'from ornitho import Field\n'), ((1038, 1230), 'ornitho.Field.create_from_ornitho_json', 'Field.create_from_ornitho_json', (["{'id': '2', 'group': 'OBS', 'name': 'ACCURACY_OF_LOCATION', 'text':\n 'Genauigkeit der Ortsangabe', 'default': '0', 'mandatory': '0',\n 'empty_choice': '1'}"], {}), "({'id': '2', 'group': 'OBS', 'name':\n 'ACCURACY_OF_LOCATION', 'text': 'Genauigkeit der Ortsangabe', 'default':\n '0', 'mandatory': '0', 'empty_choice': '1'})\n", (1068, 1230), False, 'from ornitho import Field\n')] |
from flask import Flask, render_template, request
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
app = Flask(__name__)
dic = {0 : 'No DR', 1 : 'Mild', 2 : 'Moderate', 3 : 'Severe', 4 : 'Proliferative DR'}
model = load_model('model.h5')
model.make_predict_function()
def predict_label(img_path):
i = image.load_img(img_path, target_size=(700,700))
i = image.img_to_array(i)/255.0
i = i.reshape(1, 700, 700, 3)
p = model.predict(i)
p = np.argmax(p, axis=1)
return dic[p[0]]
# routes
@app.route("/", methods=['GET', 'POST'])
def main():
return render_template("index.html")
@app.route("/about")
def about_page():
return "DR Classification App"
@app.route("/submit", methods = ['GET', 'POST'])
def get_output():
if request.method == 'POST':
img = request.files['my_image']
img_path = "static/" + img.filename
img.save(img_path)
p = predict_label(img_path)
return render_template("index.html", prediction = p, img_path = img_path)
if __name__ =='__main__':
#app.debug = True
app.run(debug = True) | [
"tensorflow.keras.preprocessing.image.load_img",
"flask.render_template",
"flask.Flask",
"numpy.argmax",
"tensorflow.keras.models.load_model",
"tensorflow.keras.preprocessing.image.img_to_array"
] | [((177, 192), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'from flask import Flask, render_template, request\n'), ((293, 315), 'tensorflow.keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (303, 315), False, 'from tensorflow.keras.models import load_model\n'), ((390, 438), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(700, 700)'}), '(img_path, target_size=(700, 700))\n', (404, 438), False, 'from tensorflow.keras.preprocessing import image\n'), ((545, 565), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (554, 565), True, 'import numpy as np\n'), ((668, 697), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (683, 697), False, 'from flask import Flask, render_template, request\n'), ((1019, 1081), 'flask.render_template', 'render_template', (['"""index.html"""'], {'prediction': 'p', 'img_path': 'img_path'}), "('index.html', prediction=p, img_path=img_path)\n", (1034, 1081), False, 'from flask import Flask, render_template, request\n'), ((447, 468), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['i'], {}), '(i)\n', (465, 468), False, 'from tensorflow.keras.preprocessing import image\n')] |
import media
from fresh_tomatoes import open_movies_page
# Let's define an empty list to keep the movies
movies = []
# Append the movies one by one
movies.append(media.Movie(title="Wonder Park (2018)",
poster_image_url="https://goo.gl/uXzuFk",
trailer_youtube_url="https://www.youtube.com/watch?v=vYm7mYd0SgE")) # NOQA
movies.append(media.Movie(title="Bumblebee (2018)",
poster_image_url="https://goo.gl/H4mMG5",
trailer_youtube_url="https://www.youtube.com/watch?v=fAIX12F6958")) # NOQA
movies.append(media.Movie(title="Mission: Impossible - Fallout (2018)",
poster_image_url="https://goo.gl/jW8WSw",
trailer_youtube_url="https://www.youtube.com/watch?v=wb49-oV0F78")) # NOQA
movies.append(media.Movie(title="Ralph Breaks the Internet: "
"Wreck-It Ralph 2 (2018)",
poster_image_url="https://goo.gl/DLWJEs",
trailer_youtube_url="https://www.youtube.com/watch?v=lX71_Jcm4po")) # NOQA
# Render a web page and open it
open_movies_page(movies)
| [
"fresh_tomatoes.open_movies_page",
"media.Movie"
] | [((1066, 1090), 'fresh_tomatoes.open_movies_page', 'open_movies_page', (['movies'], {}), '(movies)\n', (1082, 1090), False, 'from fresh_tomatoes import open_movies_page\n'), ((164, 322), 'media.Movie', 'media.Movie', ([], {'title': '"""Wonder Park (2018)"""', 'poster_image_url': '"""https://goo.gl/uXzuFk"""', 'trailer_youtube_url': '"""https://www.youtube.com/watch?v=vYm7mYd0SgE"""'}), "(title='Wonder Park (2018)', poster_image_url=\n 'https://goo.gl/uXzuFk', trailer_youtube_url=\n 'https://www.youtube.com/watch?v=vYm7mYd0SgE')\n", (175, 322), False, 'import media\n'), ((364, 520), 'media.Movie', 'media.Movie', ([], {'title': '"""Bumblebee (2018)"""', 'poster_image_url': '"""https://goo.gl/H4mMG5"""', 'trailer_youtube_url': '"""https://www.youtube.com/watch?v=fAIX12F6958"""'}), "(title='Bumblebee (2018)', poster_image_url=\n 'https://goo.gl/H4mMG5', trailer_youtube_url=\n 'https://www.youtube.com/watch?v=fAIX12F6958')\n", (375, 520), False, 'import media\n'), ((562, 738), 'media.Movie', 'media.Movie', ([], {'title': '"""Mission: Impossible - Fallout (2018)"""', 'poster_image_url': '"""https://goo.gl/jW8WSw"""', 'trailer_youtube_url': '"""https://www.youtube.com/watch?v=wb49-oV0F78"""'}), "(title='Mission: Impossible - Fallout (2018)', poster_image_url=\n 'https://goo.gl/jW8WSw', trailer_youtube_url=\n 'https://www.youtube.com/watch?v=wb49-oV0F78')\n", (573, 738), False, 'import media\n'), ((780, 969), 'media.Movie', 'media.Movie', ([], {'title': '"""Ralph Breaks the Internet: Wreck-It Ralph 2 (2018)"""', 'poster_image_url': '"""https://goo.gl/DLWJEs"""', 'trailer_youtube_url': '"""https://www.youtube.com/watch?v=lX71_Jcm4po"""'}), "(title='Ralph Breaks the Internet: Wreck-It Ralph 2 (2018)',\n poster_image_url='https://goo.gl/DLWJEs', trailer_youtube_url=\n 'https://www.youtube.com/watch?v=lX71_Jcm4po')\n", (791, 969), False, 'import media\n')] |
import click
import pytest
from json import loads
from tests.environment import DNA_CENTER_VERSION
from tests.models.schema_validator import json_schema_validate
from dnacentersdk import mydict_data_factory
from tests.environment import (
DNA_CENTER_USERNAME, DNA_CENTER_PASSWORD,
DNA_CENTER_ENCODED_AUTH
)
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.2.10', reason='version does not match')
# @pytest.mark.authentication
# def test_authentication_api(runner, cli, auth_options):
# result = runner.invoke(cli, ['v1-2-10', *auth_options, 'authentication', 'authentication-api', '''--username=DNA_CENTER_USERNAME''', '''--password=<PASSWORD>''', '''--encoded_auth=DNA_CENTER_ENCODED_AUTH'''])
# assert not result.exception
# if result.output.strip():
# obj = loads(result.output)
# assert json_schema_validate('jsd_ac8ae94c4e69a09d_v1_2_10').validate(obj) is None
| [
"pytest.mark.skipif"
] | [((330, 418), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(DNA_CENTER_VERSION != '1.2.10')"], {'reason': '"""version does not match"""'}), "(DNA_CENTER_VERSION != '1.2.10', reason=\n 'version does not match')\n", (348, 418), False, 'import pytest\n')] |
# Copyright 2016-present Facebook. All Rights Reserved.
#
# protocol: logic for a server providing fastannotate support
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import os
from mercurial.i18n import _
from mercurial.pycompat import open
from mercurial import (
error,
extensions,
hg,
pycompat,
util,
wireprotov1peer,
wireprotov1server,
)
from . import context
# common
def _getmaster(ui):
"""get the mainbranch, and enforce it is set"""
master = ui.config(b'fastannotate', b'mainbranch')
if not master:
raise error.Abort(
_(
b'fastannotate.mainbranch is required '
b'for both the client and the server'
)
)
return master
# server-side
def _capabilities(orig, repo, proto):
result = orig(repo, proto)
result.append(b'getannotate')
return result
def _getannotate(repo, proto, path, lastnode):
# output:
# FILE := vfspath + '\0' + str(size) + '\0' + content
# OUTPUT := '' | FILE + OUTPUT
result = b''
buildondemand = repo.ui.configbool(
b'fastannotate', b'serverbuildondemand', True
)
with context.annotatecontext(repo, path) as actx:
if buildondemand:
# update before responding to the client
master = _getmaster(repo.ui)
try:
if not actx.isuptodate(master):
actx.annotate(master, master)
except Exception:
# non-fast-forward move or corrupted. rebuild automically.
actx.rebuild()
try:
actx.annotate(master, master)
except Exception:
actx.rebuild() # delete files
finally:
# although the "with" context will also do a close/flush, we
# need to do it early so we can send the correct respond to
# client.
actx.close()
# send back the full content of revmap and linelog, in the future we
# may want to do some rsync-like fancy updating.
# the lastnode check is not necessary if the client and the server
# agree where the main branch is.
if actx.lastnode != lastnode:
for p in [actx.revmappath, actx.linelogpath]:
if not os.path.exists(p):
continue
with open(p, b'rb') as f:
content = f.read()
vfsbaselen = len(repo.vfs.base + b'/')
relpath = p[vfsbaselen:]
result += b'%s\0%d\0%s' % (relpath, len(content), content)
return result
def _registerwireprotocommand():
if b'getannotate' in wireprotov1server.commands:
return
wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')(
_getannotate
)
def serveruisetup(ui):
_registerwireprotocommand()
extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
# client-side
def _parseresponse(payload):
result = {}
i = 0
l = len(payload) - 1
state = 0 # 0: vfspath, 1: size
vfspath = size = b''
while i < l:
ch = payload[i : i + 1]
if ch == b'\0':
if state == 1:
result[vfspath] = payload[i + 1 : i + 1 + int(size)]
i += int(size)
state = 0
vfspath = size = b''
elif state == 0:
state = 1
else:
if state == 1:
size += ch
elif state == 0:
vfspath += ch
i += 1
return result
def peersetup(ui, peer):
class fastannotatepeer(peer.__class__):
@wireprotov1peer.batchable
def getannotate(self, path, lastnode=None):
if not self.capable(b'getannotate'):
ui.warn(_(b'remote peer cannot provide annotate cache\n'))
yield None, None
else:
args = {b'path': path, b'lastnode': lastnode or b''}
f = wireprotov1peer.future()
yield args, f
yield _parseresponse(f.value)
peer.__class__ = fastannotatepeer
@contextlib.contextmanager
def annotatepeer(repo):
ui = repo.ui
remotepath = ui.expandpath(
ui.config(b'fastannotate', b'remotepath', b'default')
)
peer = hg.peer(ui, {}, remotepath)
try:
yield peer
finally:
peer.close()
def clientfetch(repo, paths, lastnodemap=None, peer=None):
"""download annotate cache from the server for paths"""
if not paths:
return
if peer is None:
with annotatepeer(repo) as peer:
return clientfetch(repo, paths, lastnodemap, peer)
if lastnodemap is None:
lastnodemap = {}
ui = repo.ui
results = []
with peer.commandexecutor() as batcher:
ui.debug(b'fastannotate: requesting %d files\n' % len(paths))
for p in paths:
results.append(
batcher.callcommand(
b'getannotate',
{b'path': p, b'lastnode': lastnodemap.get(p)},
)
)
for result in results:
r = result.result()
# TODO: pconvert these paths on the server?
r = {util.pconvert(p): v for p, v in pycompat.iteritems(r)}
for path in sorted(r):
# ignore malicious paths
if not path.startswith(b'fastannotate/') or b'/../' in (
path + b'/'
):
ui.debug(
b'fastannotate: ignored malicious path %s\n' % path
)
continue
content = r[path]
if ui.debugflag:
ui.debug(
b'fastannotate: writing %d bytes to %s\n'
% (len(content), path)
)
repo.vfs.makedirs(os.path.dirname(path))
with repo.vfs(path, b'wb') as f:
f.write(content)
def _filterfetchpaths(repo, paths):
"""return a subset of paths whose history is long and need to fetch linelog
from the server. works with remotefilelog and non-remotefilelog repos.
"""
threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10)
if threshold <= 0:
return paths
result = []
for path in paths:
try:
if len(repo.file(path)) >= threshold:
result.append(path)
except Exception: # file not found etc.
result.append(path)
return result
def localreposetup(ui, repo):
class fastannotaterepo(repo.__class__):
def prefetchfastannotate(self, paths, peer=None):
master = _getmaster(self.ui)
needupdatepaths = []
lastnodemap = {}
try:
for path in _filterfetchpaths(self, paths):
with context.annotatecontext(self, path) as actx:
if not actx.isuptodate(master, strict=False):
needupdatepaths.append(path)
lastnodemap[path] = actx.lastnode
if needupdatepaths:
clientfetch(self, needupdatepaths, lastnodemap, peer)
except Exception as ex:
# could be directory not writable or so, not fatal
self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex)
repo.__class__ = fastannotaterepo
def clientreposetup(ui, repo):
_registerwireprotocommand()
if repo.local():
localreposetup(ui, repo)
# TODO: this mutates global state, but only if at least one repo
# has the extension enabled. This is probably bad for hgweb.
if peersetup not in hg.wirepeersetupfuncs:
hg.wirepeersetupfuncs.append(peersetup)
| [
"os.path.exists",
"mercurial.extensions.wrapfunction",
"mercurial.wireprotov1server.wireprotocommand",
"mercurial.i18n._",
"mercurial.hg.wirepeersetupfuncs.append",
"mercurial.pycompat.open",
"os.path.dirname",
"mercurial.wireprotov1peer.future",
"mercurial.hg.peer",
"mercurial.pycompat.iteritems",
"mercurial.util.pconvert"
] | [((3073, 3148), 'mercurial.extensions.wrapfunction', 'extensions.wrapfunction', (['wireprotov1server', "b'_capabilities'", '_capabilities'], {}), "(wireprotov1server, b'_capabilities', _capabilities)\n", (3096, 3148), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((4531, 4558), 'mercurial.hg.peer', 'hg.peer', (['ui', '{}', 'remotepath'], {}), '(ui, {}, remotepath)\n', (4538, 4558), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((2915, 2983), 'mercurial.wireprotov1server.wireprotocommand', 'wireprotov1server.wireprotocommand', (["b'getannotate'", "b'path lastnode'"], {}), "(b'getannotate', b'path lastnode')\n", (2949, 2983), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((8025, 8064), 'mercurial.hg.wirepeersetupfuncs.append', 'hg.wirepeersetupfuncs.append', (['peersetup'], {}), '(peersetup)\n', (8053, 8064), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((739, 815), 'mercurial.i18n._', '_', (["b'fastannotate.mainbranch is required for both the client and the server'"], {}), "(b'fastannotate.mainbranch is required for both the client and the server')\n", (740, 815), False, 'from mercurial.i18n import _\n'), ((4209, 4233), 'mercurial.wireprotov1peer.future', 'wireprotov1peer.future', ([], {}), '()\n', (4231, 4233), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((5466, 5482), 'mercurial.util.pconvert', 'util.pconvert', (['p'], {}), '(p)\n', (5479, 5482), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((2490, 2507), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2504, 2507), False, 'import os\n'), ((2559, 2573), 'mercurial.pycompat.open', 'open', (['p', "b'rb'"], {}), "(p, b'rb')\n", (2563, 2573), False, 'from mercurial.pycompat import open\n'), ((4018, 4067), 'mercurial.i18n._', '_', (["b'remote peer cannot provide annotate cache\\n'"], {}), "(b'remote peer cannot provide annotate cache\\n')\n", (4019, 4067), False, 'from mercurial.i18n import _\n'), ((5498, 5519), 'mercurial.pycompat.iteritems', 'pycompat.iteritems', (['r'], {}), '(r)\n', (5516, 5519), False, 'from mercurial import error, extensions, hg, pycompat, util, wireprotov1peer, wireprotov1server\n'), ((6144, 6165), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6159, 6165), False, 'import os\n')] |
import SeeThru_Feeds.Core.Exceptions.Config as ConfigExceptions
class Meta:
Script_Name: str
Script_Output_Path: str
Script_Object_Path: str
def __init__(self, name: str, output_path: str, object_path: str):
self.Script_Name = name
self.Script_Output_Path = output_path
self.Script_Object_Path = object_path
@staticmethod
def new(name: str) -> "Meta":
"""
Creates a new script meta, with attributes derived from the name
Args:
name (str): The name of the script
Returns:
ScriptMeta: The new script meta
"""
output_path = f"Outputs/{name}"
object_path = f"Scripts.{name}@{name}"
return Meta(name, output_path, object_path)
@staticmethod
def load(data: dict, script_name: str) -> "Meta":
"""
Parses the given data into a script meta
Args:
data: The data to parse
script_name: The name of the script ot which this belongs
Returns:
Meta: The new script meta
"""
if "Script_Name" not in data:
raise ConfigExceptions.ScriptMetaException("A script meta has no name", script_name)
if "Script_Output_Path" not in data:
raise ConfigExceptions.ScriptMetaException("A script meta has no output path", script_name)
if "Script_Object_Path" not in data:
raise ConfigExceptions.ScriptMetaException("A script meta has no script module", script_name)
return Meta(
data["Script_Name"],
data["Script_Output_Path"],
data["Script_Object_Path"]
)
def dump(self) -> dict:
"""
Dumps the meta into a dictionary
Returns:
dict: The dumped data
"""
return {
"Script_Name": self.Script_Name,
"Script_Output_Path": self.Script_Output_Path,
"Script_Object_Path": self.Script_Object_Path
}
| [
"SeeThru_Feeds.Core.Exceptions.Config.ScriptMetaException"
] | [((1143, 1221), 'SeeThru_Feeds.Core.Exceptions.Config.ScriptMetaException', 'ConfigExceptions.ScriptMetaException', (['"""A script meta has no name"""', 'script_name'], {}), "('A script meta has no name', script_name)\n", (1179, 1221), True, 'import SeeThru_Feeds.Core.Exceptions.Config as ConfigExceptions\n'), ((1285, 1374), 'SeeThru_Feeds.Core.Exceptions.Config.ScriptMetaException', 'ConfigExceptions.ScriptMetaException', (['"""A script meta has no output path"""', 'script_name'], {}), "('A script meta has no output path',\n script_name)\n", (1321, 1374), True, 'import SeeThru_Feeds.Core.Exceptions.Config as ConfigExceptions\n'), ((1434, 1525), 'SeeThru_Feeds.Core.Exceptions.Config.ScriptMetaException', 'ConfigExceptions.ScriptMetaException', (['"""A script meta has no script module"""', 'script_name'], {}), "('A script meta has no script module',\n script_name)\n", (1470, 1525), True, 'import SeeThru_Feeds.Core.Exceptions.Config as ConfigExceptions\n')] |
"""
Provides a HouseCanaryObject class which encapsulates
an object and its associated data.
Currently, only the Property subclass is implemented.
"""
from builtins import str
from builtins import next
from builtins import object
import housecanary.constants as constants
def _create_component_results(json_data, result_key):
""" Returns a list of ComponentResult from the json_data"""
component_results = []
for key, value in list(json_data.items()):
if key not in [result_key, "meta"]:
component_result = ComponentResult(
key,
value["result"],
value["api_code"],
value["api_code_description"]
)
component_results.append(component_result)
return component_results
class HouseCanaryObject(object):
"""Base class for returned API objects."""
def __init__(self):
"""
Args:
data - Json data returned from the API for this object.
api_code - The HouseCanary business logic error code.
api_code_description - The HouseCanary business logic error description.
"""
self.component_results = []
def has_error(self):
"""Returns whether there was a business logic error when fetching data
for any components for this property.
Returns:
boolean
"""
return next(
(True for cr in self.component_results
if cr.has_error()),
False
)
def get_errors(self):
"""If there were any business errors fetching data for this property,
returns the error messages.
Returns:
string - the error message, or None if there was no error.
"""
return [{cr.component_name: cr.get_error()}
for cr in self.component_results if cr.has_error()]
def __str__(self):
return "HouseCanaryObject"
class Property(HouseCanaryObject):
"""A single address"""
def __init__(self, address=None, zipcode=None):
"""
Args:
address (required) -- Building number, street name and unit number.
zipcode (required) -- Zipcode that matches the address.
data (optional) -- The data returned from the API for this property.
api_code (optional) -- The HouseCanary business logic
error code reflecting any error with this property.
api_code_description (optional) -- The HouseCanary business logic
error description.
"""
super(Property, self).__init__()
self.address = str(address)
self.zipcode = str(zipcode)
self.block_id = None
self.zipcode_plus4 = None
self.address_full = None
self.city = None
self.county_fips = None
self.geo_precision = None
self.lat = None
self.lng = None
self.slug = None
self.state = None
self.unit = None
self.meta = None
@classmethod
def create_from_json(cls, json_data):
"""Deserialize property json data into a Property object
Args:
json_data (dict): The json data for this property
Returns:
Property object
"""
prop = Property()
address_info = json_data["address_info"]
prop.address = address_info["address"]
prop.block_id = address_info["block_id"]
prop.zipcode = address_info["zipcode"]
prop.zipcode_plus4 = address_info["zipcode_plus4"]
prop.address_full = address_info["address_full"]
prop.city = address_info["city"]
prop.county_fips = address_info["county_fips"]
prop.geo_precision = address_info["geo_precision"]
prop.lat = address_info["lat"]
prop.lng = address_info["lng"]
prop.slug = address_info["slug"]
prop.state = address_info["state"]
prop.unit = address_info["unit"]
prop.meta = None
if "meta" in json_data:
prop.meta = json_data["meta"]
prop.component_results = _create_component_results(json_data, "address_info")
return prop
def __str__(self):
return self.address or self.meta or "PropertyObject"
class Block(HouseCanaryObject):
"""A single block"""
def __init__(self, block_id=None):
"""
Args:
block_id (required) -- Block ID.
data (optional) -- The data returned from the API for this block.
api_code (optional) -- The HouseCanary business logic
error code reflecting any error with this block.
api_code_description (optional) -- The HouseCanary business logic error description.
"""
super(Block, self).__init__()
self.block_id = str(block_id)
self.num_bins = None
self.property_type = None
self.meta = None
@classmethod
def create_from_json(cls, json_data):
"""Deserialize block json data into a Block object
Args:
json_data (dict): The json data for this block
Returns:
Block object
"""
block = Block()
block_info = json_data["block_info"]
block.block_id = block_info["block_id"]
block.num_bins = block_info["num_bins"] if "num_bins" in block_info else None
block.property_type = block_info["property_type"] if "property_type" in block_info else None
block.meta = json_data["meta"] if "meta" in json_data else None
block.component_results = _create_component_results(json_data, "block_info")
return block
def __str__(self):
return self.block_id or self.meta or "BlockObject"
class ZipCode(HouseCanaryObject):
"""A single zipcode"""
def __init__(self, zipcode=None):
"""
Args:
zipcode (required) -- Zipcode.
data (optional) -- The data returned from the API for this zipcode.
api_code (optional) -- The HouseCanary business logic
error code reflecting any error with this zipcode.
api_code_description (optional) -- The HouseCanary business logic error description.
"""
super(ZipCode, self).__init__()
self.zipcode = str(zipcode)
self.meta = None
@classmethod
def create_from_json(cls, json_data):
"""Deserialize zipcode json data into a ZipCode object
Args:
json_data (dict): The json data for this zipcode
Returns:
Zip object
"""
zipcode = ZipCode()
zipcode.zipcode = json_data["zipcode_info"]["zipcode"]
zipcode.meta = json_data["meta"] if "meta" in json_data else None
zipcode.component_results = _create_component_results(json_data, "zipcode_info")
return zipcode
def __str__(self):
return self.zipcode or self.meta or "ZipCodeObject"
class Msa(HouseCanaryObject):
"""A single MSA"""
def __init__(self, msa=None):
"""
Args:
msa (required) -- MSA.
data (optional) -- The data returned from the API for this MSA.
api_code (optional) -- The HouseCanary business logic
error code reflecting any error with this MSA.
api_code_description (optional) -- The HouseCanary business logic error description.
"""
super(Msa, self).__init__()
self.msa = str(msa)
self.meta = None
@classmethod
def create_from_json(cls, json_data):
"""Deserialize msa json data into a Msa object
Args:
json_data (dict): The json data for this msa
Returns:
Msa object
"""
msa = Msa()
msa.msa = json_data["msa_info"]["msa"]
msa.meta = json_data["meta"] if "meta" in json_data else None
msa.component_results = _create_component_results(json_data, "msa_info")
return msa
def __str__(self):
return self.msa or self.meta or "MsaObject"
class ComponentResult(object):
"""The results of a single component"""
def __init__(self, component_name, json_data, api_code, api_code_description):
"""
Args:
component_name - string name of the component.
json_data - Json data returned from the API for this object.
api_code - The HouseCanary business logic error code.
api_code_description - The HouseCanary business logic error description.
"""
self.component_name = component_name
self.json_data = json_data
self.api_code = api_code
self.api_code_description = api_code_description
def has_error(self):
"""Returns whether this component had a business logic error"""
return self.api_code > constants.BIZ_CODE_OK
def get_error(self):
"""Gets the error of this component, if any"""
return self.api_code_description
| [
"builtins.str"
] | [((2646, 2658), 'builtins.str', 'str', (['address'], {}), '(address)\n', (2649, 2658), False, 'from builtins import str\n'), ((2682, 2694), 'builtins.str', 'str', (['zipcode'], {}), '(zipcode)\n', (2685, 2694), False, 'from builtins import str\n'), ((4828, 4841), 'builtins.str', 'str', (['block_id'], {}), '(block_id)\n', (4831, 4841), False, 'from builtins import str\n'), ((6303, 6315), 'builtins.str', 'str', (['zipcode'], {}), '(zipcode)\n', (6306, 6315), False, 'from builtins import str\n'), ((7478, 7486), 'builtins.str', 'str', (['msa'], {}), '(msa)\n', (7481, 7486), False, 'from builtins import str\n')] |
# Author: <NAME>
# Email: <EMAIL>
from ImportParamFromFile import ImportParamFromFile
from scipy.integrate import odeint
from ColDetModel import ColDetModel
from RunModel import RunModel
import multiprocessing
import math
import numpy as np
import sys
import pickle
import time
def ParamAdjust(itKey, parameters, y0, t0, t, baselineLeakage, scale, sampleTiming, ModelName=ColDetModel):
#Run the model with the appropriate changes
parameters[itKey] = parameters[itKey] * scale
model = ModelName()
sol = odeint(model.DifEqs, y0, t, args = (parameters,), full_output = 0, atol = 1.0e-1, hmax = t0[2])
while (model.tSwitch > (t[-1] - (sampleTiming*3.5))):
t = np.arange(t0[0],(t[-1]*1.5)+t0[2],t0[2])
sol = odeint(model.DifEqs, y0, t, args = (parameters,), full_output = 0, atol = 1.0e-1, hmax = t0[2])
switchIndex = math.ceil(model.tSwitch / t0[2])
indexArray = -1
gain = np.array([((sol[indexArray,19]-baselineLeakage)/baselineLeakage)/(scale-1)])
#gain is percent change in leakage over percent change in parameter
return [itKey, gain]
def RunTask(inQueue, outQueue):
#Function run by the child processes to handle tasks and report results
while(1):
func,args = inQueue.get()
outQueue.put(func(*args))
inQueue.task_done()
if __name__ == '__main__':
#initialization
numCPUs = multiprocessing.cpu_count()
inputSource='ColDet-model-parameters.csv'
scales = (1.05, 1.04, 1.03, 1.02, 1.01, 0.99, 0.98, 0.97, 0.96, 0.95)
#scales = (1.05,0.95)
#Depreciated
sampleTiming = 365
y0, t0, parameters = ImportParamFromFile(inputSource, ColDetModel)
#Runbase case
t = np.arange(t0[0],t0[1]+t0[2],t0[2])
model = ColDetModel()
sol = odeint(model.DifEqs, y0, t, args = (parameters,), full_output = 0, atol = 1.0e-1, hmax = t0[2])
indexArray = -1
baselineLeakage = np.array(sol[indexArray,19])
#reinit parameters to revert changes that may have occured calculation of the model
y0, t0, parameters = ImportParamFromFile(inputSource, ColDetModel)
#initializing multiprocessing
processes = []
resultsArrays= []
finishQueues = []
multiprocessing.freeze_support()
runQueue = multiprocessing.JoinableQueue()
#Set up results Queues and Processes
for i in range(numCPUs):
finishQueues.append(multiprocessing.Queue())
print('Made result Queue #' + repr(i+1))
processes.append(multiprocessing.Process(target = RunTask, args=(runQueue,finishQueues[i])))
processes[i].start()
print('Made process #' + repr(i+1))
#Run sensativity analysis for each scale factor
paramKeys = list(parameters.keys())
for j in range(len(scales)):
keyArray = np.empty(0)
gainArray = np.empty(0)
#Set up and push tasks to queue
tasks = [(ParamAdjust, (key, parameters, y0, t0, t, baselineLeakage, scales[j], sampleTiming)) for key in paramKeys]
for task in tasks:
runQueue.put_nowait(task)
runQueue.join()
#Pull results and append them to a list defined in an outer scope
results = []
for i in range(len(finishQueues)):
while(not finishQueues[i].empty()):
results.append(finishQueues[i].get())
for i in range(len(results)):
keyArray = np.append(keyArray, results[i][0])
gainArray = np.append(gainArray, results[i][1])
#Combine keyArray and gainArray into one array
compositeArray = np.append(np.transpose(np.atleast_2d(keyArray)), np.transpose(np.atleast_2d(gainArray)), axis=1)
resultsArrays.append(compositeArray)
for i in range(len(processes)):
processes[i].terminate()
#Dump results
file = open('LocalSensitivity.p', 'wb')
pickle.dump(resultsArrays, file)
file.close()
#Organize results into a spreadsheet
resultsOutput = np.empty((len(keyArray)+1, 0))
dType1 = np.dtype([('param', '<U32'), ('100 Days', '<U32')])
dType2 = np.dtype('<U32')
for i in range(len(scales)):
resultsArrays[i].dtype = dType1
resultsArrays[i] = np.sort(resultsArrays[i], axis = 0, order='param')
resultsArrays[i].dtype = dType2
dummyArray1 = np.array([['scale', scales[i], '']])
dummyArray2 = np.full((len(keyArray), 1), '', dtype = '<U5')
dummyArray2 = np.append(dummyArray2, resultsArrays[i], axis = 1)
dummyArray3 = np.append(dummyArray1, dummyArray2, axis=0)
resultsOutput = np.append(resultsOutput, dummyArray3, axis = 1)
np.savetxt('SensativityAnalysis.csv', resultsOutput, fmt = '%s', delimiter = ',')
| [
"numpy.atleast_2d",
"math.ceil",
"ImportParamFromFile.ImportParamFromFile",
"multiprocessing.JoinableQueue",
"pickle.dump",
"scipy.integrate.odeint",
"numpy.sort",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"numpy.append",
"numpy.array",
"multiprocessing.freeze_support",
"ColDetModel.ColDetModel",
"numpy.empty",
"numpy.savetxt",
"numpy.dtype",
"multiprocessing.Queue",
"numpy.arange"
] | [((526, 614), 'scipy.integrate.odeint', 'odeint', (['model.DifEqs', 'y0', 't'], {'args': '(parameters,)', 'full_output': '(0)', 'atol': '(0.1)', 'hmax': 't0[2]'}), '(model.DifEqs, y0, t, args=(parameters,), full_output=0, atol=0.1,\n hmax=t0[2])\n', (532, 614), False, 'from scipy.integrate import odeint\n'), ((876, 908), 'math.ceil', 'math.ceil', (['(model.tSwitch / t0[2])'], {}), '(model.tSwitch / t0[2])\n', (885, 908), False, 'import math\n'), ((946, 1034), 'numpy.array', 'np.array', (['[(sol[indexArray, 19] - baselineLeakage) / baselineLeakage / (scale - 1)]'], {}), '([(sol[indexArray, 19] - baselineLeakage) / baselineLeakage / (\n scale - 1)])\n', (954, 1034), True, 'import numpy as np\n'), ((1447, 1474), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1472, 1474), False, 'import multiprocessing\n'), ((1703, 1748), 'ImportParamFromFile.ImportParamFromFile', 'ImportParamFromFile', (['inputSource', 'ColDetModel'], {}), '(inputSource, ColDetModel)\n', (1722, 1748), False, 'from ImportParamFromFile import ImportParamFromFile\n'), ((1776, 1814), 'numpy.arange', 'np.arange', (['t0[0]', '(t0[1] + t0[2])', 't0[2]'], {}), '(t0[0], t0[1] + t0[2], t0[2])\n', (1785, 1814), True, 'import numpy as np\n'), ((1831, 1844), 'ColDetModel.ColDetModel', 'ColDetModel', ([], {}), '()\n', (1842, 1844), False, 'from ColDetModel import ColDetModel\n'), ((1855, 1943), 'scipy.integrate.odeint', 'odeint', (['model.DifEqs', 'y0', 't'], {'args': '(parameters,)', 'full_output': '(0)', 'atol': '(0.1)', 'hmax': 't0[2]'}), '(model.DifEqs, y0, t, args=(parameters,), full_output=0, atol=0.1,\n hmax=t0[2])\n', (1861, 1943), False, 'from scipy.integrate import odeint\n'), ((2002, 2031), 'numpy.array', 'np.array', (['sol[indexArray, 19]'], {}), '(sol[indexArray, 19])\n', (2010, 2031), True, 'import numpy as np\n'), ((2149, 2194), 'ImportParamFromFile.ImportParamFromFile', 'ImportParamFromFile', (['inputSource', 'ColDetModel'], {}), '(inputSource, ColDetModel)\n', (2168, 2194), False, 'from ImportParamFromFile import ImportParamFromFile\n'), ((2305, 2337), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (2335, 2337), False, 'import multiprocessing\n'), ((2357, 2388), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (2386, 2388), False, 'import multiprocessing\n'), ((4065, 4097), 'pickle.dump', 'pickle.dump', (['resultsArrays', 'file'], {}), '(resultsArrays, file)\n', (4076, 4097), False, 'import pickle\n'), ((4233, 4284), 'numpy.dtype', 'np.dtype', (["[('param', '<U32'), ('100 Days', '<U32')]"], {}), "([('param', '<U32'), ('100 Days', '<U32')])\n", (4241, 4284), True, 'import numpy as np\n'), ((4298, 4314), 'numpy.dtype', 'np.dtype', (['"""<U32"""'], {}), "('<U32')\n", (4306, 4314), True, 'import numpy as np\n'), ((4881, 4958), 'numpy.savetxt', 'np.savetxt', (['"""SensativityAnalysis.csv"""', 'resultsOutput'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "('SensativityAnalysis.csv', resultsOutput, fmt='%s', delimiter=',')\n", (4891, 4958), True, 'import numpy as np\n'), ((697, 741), 'numpy.arange', 'np.arange', (['t0[0]', '(t[-1] * 1.5 + t0[2])', 't0[2]'], {}), '(t0[0], t[-1] * 1.5 + t0[2], t0[2])\n', (706, 741), True, 'import numpy as np\n'), ((752, 840), 'scipy.integrate.odeint', 'odeint', (['model.DifEqs', 'y0', 't'], {'args': '(parameters,)', 'full_output': '(0)', 'atol': '(0.1)', 'hmax': 't0[2]'}), '(model.DifEqs, y0, t, args=(parameters,), full_output=0, atol=0.1,\n hmax=t0[2])\n', (758, 840), False, 'from scipy.integrate import odeint\n'), ((2917, 2928), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2925, 2928), True, 'import numpy as np\n'), ((2949, 2960), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2957, 2960), True, 'import numpy as np\n'), ((4429, 4477), 'numpy.sort', 'np.sort', (['resultsArrays[i]'], {'axis': '(0)', 'order': '"""param"""'}), "(resultsArrays[i], axis=0, order='param')\n", (4436, 4477), True, 'import numpy as np\n'), ((4554, 4590), 'numpy.array', 'np.array', (["[['scale', scales[i], '']]"], {}), "([['scale', scales[i], '']])\n", (4562, 4590), True, 'import numpy as np\n'), ((4683, 4731), 'numpy.append', 'np.append', (['dummyArray2', 'resultsArrays[i]'], {'axis': '(1)'}), '(dummyArray2, resultsArrays[i], axis=1)\n', (4692, 4731), True, 'import numpy as np\n'), ((4756, 4799), 'numpy.append', 'np.append', (['dummyArray1', 'dummyArray2'], {'axis': '(0)'}), '(dummyArray1, dummyArray2, axis=0)\n', (4765, 4799), True, 'import numpy as np\n'), ((4824, 4869), 'numpy.append', 'np.append', (['resultsOutput', 'dummyArray3'], {'axis': '(1)'}), '(resultsOutput, dummyArray3, axis=1)\n', (4833, 4869), True, 'import numpy as np\n'), ((2494, 2517), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (2515, 2517), False, 'import multiprocessing\n'), ((2593, 2666), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'RunTask', 'args': '(runQueue, finishQueues[i])'}), '(target=RunTask, args=(runQueue, finishQueues[i]))\n', (2616, 2666), False, 'import multiprocessing\n'), ((3564, 3598), 'numpy.append', 'np.append', (['keyArray', 'results[i][0]'], {}), '(keyArray, results[i][0])\n', (3573, 3598), True, 'import numpy as np\n'), ((3623, 3658), 'numpy.append', 'np.append', (['gainArray', 'results[i][1]'], {}), '(gainArray, results[i][1])\n', (3632, 3658), True, 'import numpy as np\n'), ((3771, 3794), 'numpy.atleast_2d', 'np.atleast_2d', (['keyArray'], {}), '(keyArray)\n', (3784, 3794), True, 'import numpy as np\n'), ((3810, 3834), 'numpy.atleast_2d', 'np.atleast_2d', (['gainArray'], {}), '(gainArray)\n', (3823, 3834), True, 'import numpy as np\n')] |
from flask import jsonify
from flask_restplus import Resource
from flask_jwt_extended import jwt_required
from application import cache
from application.tasks.main import simple_task
from . import api
@api.route("/")
class Main(Resource):
@jwt_required
@cache.cached()
def get(self):
result = simple_task.delay()
return jsonify({"message": result.get(timeout=1)})
| [
"application.tasks.main.simple_task.delay",
"application.cache.cached"
] | [((264, 278), 'application.cache.cached', 'cache.cached', ([], {}), '()\n', (276, 278), False, 'from application import cache\n'), ((315, 334), 'application.tasks.main.simple_task.delay', 'simple_task.delay', ([], {}), '()\n', (332, 334), False, 'from application.tasks.main import simple_task\n')] |
import pyautogui as p
import time
import os
# Find Ordered Item
# Location: 'items/ordered_item/<item>
CONF = 0.95
MOUSE_SPEED = 0.13
COFFEE_LOCATIONS = [(337, 717), (406, 726), (480, 733)]
MS_LOCATIONS = [(637, 688), (678, 602), (702, 523)]
OVEN_LOCATIONS = [(1462, 591), (1437, 707), (1417, 816), (1393, 941)]
ORDER_REGIONS = [
(280, 130, 160, 300),
(630, 130, 160, 300),
(970, 130, 160, 300),
(1320, 130, 160, 300),
]
PAN_LOCATIONS = [
# (846, 688),
# (866, 582),
# (1046, 586),
(1058, 690)
]
MENU = [
'vanilla', 'chocolate', 'custard',
'strawberry_vanilla', # 'strawberry_chocolate',
'strawberry_custard',
'peach_vanilla', 'peach_chocolate', 'peach_custard',
'blueberry_vanilla', # 'blueberry_chocolate',
'blueberry_custard',
'coffee', 'milkshake',
'strawberry_blueberry_vanilla',
]
ORDER = []
CAKE_QUEUE = []
MONEY_REGIONS = [
(450, 380, 90, 90),
(790, 380, 90, 90),
(1130, 380, 90, 90),
(1480, 380, 90, 90)
]
MONEY = [
(500, 430),
(830, 430),
(1180, 430),
(1530, 430)
]
class Cake:
def __init__(self, flavor, location, oven):
self.frosting = None
self.topping = None
self.flavor = flavor
self.delivery_location = location
self.oven_location = oven
self.countdown = 29
self.set_topping()
self.make_cake()
self.baking()
def update(self, val):
self.countdown -= val
return self.countdown
def set_topping(self):
if self.flavor == 'vanilla':
self.frosting = (1240, 623)
if self.flavor == 'strawberry_vanilla':
self.frosting = (1240, 623)
self.topping = [(960, 801)]
if self.flavor == 'peach_vanilla':
self.frosting = (1240, 623)
self.topping = [(860, 800)]
if self.flavor == 'blueberry_vanilla':
self.frosting = (1240, 623)
self.topping = [(1090, 800)]
if self.flavor == 'chocolate':
self.frosting = (1275, 716)
if self.flavor == 'peach_chocolate':
self.frosting = (1275, 716)
self.topping = [(860, 800)]
if self.flavor == 'custard':
self.frosting = (1216, 533)
if self.flavor == 'peach_custard':
self.frosting = (1216, 533)
self.topping = [(860, 800)]
if self.flavor == 'blueberry_custard':
self.frosting = (1216, 533)
self.topping = [(1090, 800)]
if self.flavor == 'strawberry_custard':
self.frosting = (1216, 533)
self.topping = [(960, 801)]
if self.flavor == 'strawberry_blueberry_vanilla':
self.frosting = (1240, 623)
self.topping = [(960, 801), (1090, 800)]
def make_cake(self):
addMold = click((953, 917))
serve_item(self.frosting, PAN_LOCATIONS[0])
if self.topping:
for topping in self.topping:
serve_item(topping, PAN_LOCATIONS[0])
def baking(self):
if len(CAKE_QUEUE) == 4:
remaining_time = 21
for cake in CAKE_QUEUE:
if cake.countdown < remaining_time:
remaining_time = cake.countdown
time.sleep(remaining_time * 0.15)
update_queue(remaining_time)
serve_item(PAN_LOCATIONS[0], self.oven_location)
CAKE_QUEUE.append(self)
def serve_cake(self):
serve_item(self.oven_location, self.delivery_location)
def restart(location):
time.sleep(6)
prepare.coffee_count = 0
prepare.mscounter = 0
p.click(1360, 941)
time.sleep(6)
p.click(626, 834)
def store_data():
"""This will be used to capture images of a particular region to test a machine learning
algorithm if the tests are successful"""
pass
def click(loc):
# Takes 0.2 secs to execute
p.moveTo(loc[0], loc[1], MOUSE_SPEED)
p.click()
update_queue(1)
def serve_item(start, stop):
"""
This function moves to a location and drags an object from that location to another.
input: start -> tuple containing the coordinates for the start point.
stop -> tuple containing the coordinates for the end point.
output: None
"""
# Takes 0.4 secs to execute.
p.moveTo(start[0], start[1], MOUSE_SPEED)
p.dragTo(stop[0], stop[1], MOUSE_SPEED, button='left')
update_queue(2)
def update_queue(val):
if len(CAKE_QUEUE) > 0:
rem = 5
for i in range(len(CAKE_QUEUE)):
re = CAKE_QUEUE[i].update(val)
if re <= 0:
rem = i
if rem < 5:
CAKE_QUEUE.pop(rem).serve_cake()
def find_order():
"""
This function searches a particular region to determine if a menu item exists in that location
"""
for item in MENU:
for rgn in ORDER_REGIONS:
# p.screenshot("images/data/{}.png".format(rgn[0]), region=rgn)
searchFor = 'images/menu/{}.png'.format(item)
locateItem = p.locateCenterOnScreen(
searchFor, region=rgn, confidence=CONF)
if locateItem:
data = {'item': item, 'location': locateItem}
ORDER.append(data)
def prepare():
while len(ORDER) > 0:
a = ORDER.pop(0)
item = a['item']
if item == 'coffee':
coffeeLocation = COFFEE_LOCATIONS[prepare.coffee_count % 3]
prepare.coffee_count += 1
serve_item(coffeeLocation, a['location'])
elif item == 'milkshake':
msLocation = MS_LOCATIONS[prepare.mscounter % 3]
if prepare.mscounter % 3 == 0:
click((626, 834))
serve_item(msLocation, a['location'])
prepare.mscounter += 1
else:
prepare.mscounter += 1
serve_item(msLocation, a['location'])
else:
Cake(item, a['location'], OVEN_LOCATIONS[prepare.cakecount % 4])
prepare.cakecount += 1
collect_money()
while len(CAKE_QUEUE) > 0:
time.sleep(MOUSE_SPEED)
update_queue(1)
collect_money()
def collect_money():
for rgn in MONEY:
p.click(rgn)
# Main Control
def main():
delay = 6
prepare.coffee_count = 0
prepare.mscounter = 0
prepare.cakecount = 0
print("Starting CFBot...")
time.sleep(delay)
click((626, 834))
prepare.mscounter += 1
while True:
find_order()
prepare()
restart_button = p.locateCenterOnScreen(
'images/ref/restart.png')
if restart_button:
# break
restart(restart_button)
main()
| [
"pyautogui.locateCenterOnScreen",
"pyautogui.moveTo",
"time.sleep",
"pyautogui.click",
"pyautogui.dragTo"
] | [((3665, 3678), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (3675, 3678), False, 'import time\n'), ((3741, 3759), 'pyautogui.click', 'p.click', (['(1360)', '(941)'], {}), '(1360, 941)\n', (3748, 3759), True, 'import pyautogui as p\n'), ((3765, 3778), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (3775, 3778), False, 'import time\n'), ((3784, 3801), 'pyautogui.click', 'p.click', (['(626)', '(834)'], {}), '(626, 834)\n', (3791, 3801), True, 'import pyautogui as p\n'), ((4034, 4071), 'pyautogui.moveTo', 'p.moveTo', (['loc[0]', 'loc[1]', 'MOUSE_SPEED'], {}), '(loc[0], loc[1], MOUSE_SPEED)\n', (4042, 4071), True, 'import pyautogui as p\n'), ((4077, 4086), 'pyautogui.click', 'p.click', ([], {}), '()\n', (4084, 4086), True, 'import pyautogui as p\n'), ((4456, 4497), 'pyautogui.moveTo', 'p.moveTo', (['start[0]', 'start[1]', 'MOUSE_SPEED'], {}), '(start[0], start[1], MOUSE_SPEED)\n', (4464, 4497), True, 'import pyautogui as p\n'), ((4503, 4557), 'pyautogui.dragTo', 'p.dragTo', (['stop[0]', 'stop[1]', 'MOUSE_SPEED'], {'button': '"""left"""'}), "(stop[0], stop[1], MOUSE_SPEED, button='left')\n", (4511, 4557), True, 'import pyautogui as p\n'), ((6607, 6624), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (6617, 6624), False, 'import time\n'), ((6297, 6320), 'time.sleep', 'time.sleep', (['MOUSE_SPEED'], {}), '(MOUSE_SPEED)\n', (6307, 6320), False, 'import time\n'), ((6425, 6437), 'pyautogui.click', 'p.click', (['rgn'], {}), '(rgn)\n', (6432, 6437), True, 'import pyautogui as p\n'), ((6760, 6808), 'pyautogui.locateCenterOnScreen', 'p.locateCenterOnScreen', (['"""images/ref/restart.png"""'], {}), "('images/ref/restart.png')\n", (6782, 6808), True, 'import pyautogui as p\n'), ((3372, 3405), 'time.sleep', 'time.sleep', (['(remaining_time * 0.15)'], {}), '(remaining_time * 0.15)\n', (3382, 3405), False, 'import time\n'), ((5217, 5279), 'pyautogui.locateCenterOnScreen', 'p.locateCenterOnScreen', (['searchFor'], {'region': 'rgn', 'confidence': 'CONF'}), '(searchFor, region=rgn, confidence=CONF)\n', (5239, 5279), True, 'import pyautogui as p\n')] |
from django.utils.timezone import now
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from .utils import get_additional_styles
class Quote(CMSPlugin):
"""
A quote or testimonial
"""
STANDARD = 'standard'
STYLE_CHOICES = [
(STANDARD, _('Standard')),
]
style = models.CharField(
_('Style'), choices=STYLE_CHOICES + get_additional_styles(), default=STANDARD, max_length=50
)
created_at = models.DateTimeField(_('Created at'), default=now)
content = models.TextField(_('Quote'), default='')
footer = models.TextField(_('Footer'), blank=True)
url = models.URLField(_('Link'), blank=True)
target = models.CharField(
_('Target'), max_length=50, blank=True, default='_blank', choices=(('_blank', _('New window')),)
)
def __unicode__(self):
return self.content[:50]
| [
"django.utils.translation.ugettext_lazy"
] | [((397, 407), 'django.utils.translation.ugettext_lazy', '_', (['"""Style"""'], {}), "('Style')\n", (398, 407), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((534, 549), 'django.utils.translation.ugettext_lazy', '_', (['"""Created at"""'], {}), "('Created at')\n", (535, 549), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((595, 605), 'django.utils.translation.ugettext_lazy', '_', (['"""Quote"""'], {}), "('Quote')\n", (596, 605), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((649, 660), 'django.utils.translation.ugettext_lazy', '_', (['"""Footer"""'], {}), "('Footer')\n", (650, 660), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((700, 709), 'django.utils.translation.ugettext_lazy', '_', (['"""Link"""'], {}), "('Link')\n", (701, 709), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((762, 773), 'django.utils.translation.ugettext_lazy', '_', (['"""Target"""'], {}), "('Target')\n", (763, 773), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((336, 349), 'django.utils.translation.ugettext_lazy', '_', (['"""Standard"""'], {}), "('Standard')\n", (337, 349), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((840, 855), 'django.utils.translation.ugettext_lazy', '_', (['"""New window"""'], {}), "('New window')\n", (841, 855), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
Subsets and Splits