code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from django.urls import path
from pridesport_work.pridesport_auth.views import login_user, logout_user, RegisterView, UserProfileView
urlpatterns = (
# path('register/', register_user, name='register user'),
path('register/', RegisterView.as_view(), name='register user'),
path('login/', login_user, name='login user'),
path('logout/', logout_user, name='logout user'),
path('profile/<int:pk>', login_user, name='user profile'),
path('profile/', UserProfileView.as_view() , name='user profile'),
) | [
"pridesport_work.pridesport_auth.views.UserProfileView.as_view",
"django.urls.path",
"pridesport_work.pridesport_auth.views.RegisterView.as_view"
] | [((287, 332), 'django.urls.path', 'path', (['"""login/"""', 'login_user'], {'name': '"""login user"""'}), "('login/', login_user, name='login user')\n", (291, 332), False, 'from django.urls import path\n'), ((338, 386), 'django.urls.path', 'path', (['"""logout/"""', 'logout_user'], {'name': '"""logout user"""'}), "('logout/', logout_user, name='logout user')\n", (342, 386), False, 'from django.urls import path\n'), ((392, 449), 'django.urls.path', 'path', (['"""profile/<int:pk>"""', 'login_user'], {'name': '"""user profile"""'}), "('profile/<int:pk>', login_user, name='user profile')\n", (396, 449), False, 'from django.urls import path\n'), ((236, 258), 'pridesport_work.pridesport_auth.views.RegisterView.as_view', 'RegisterView.as_view', ([], {}), '()\n', (256, 258), False, 'from pridesport_work.pridesport_auth.views import login_user, logout_user, RegisterView, UserProfileView\n'), ((472, 497), 'pridesport_work.pridesport_auth.views.UserProfileView.as_view', 'UserProfileView.as_view', ([], {}), '()\n', (495, 497), False, 'from pridesport_work.pridesport_auth.views import login_user, logout_user, RegisterView, UserProfileView\n')] |
# Generated by Django 3.1.6 on 2021-02-14 19:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0013_auto_20210214_1854'),
]
operations = [
migrations.RenameField(
model_name='cashflow',
old_name='common_stock_issurance',
new_name='common_stock_issuance',
),
]
| [
"django.db.migrations.RenameField"
] | [((225, 344), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""cashflow"""', 'old_name': '"""common_stock_issurance"""', 'new_name': '"""common_stock_issuance"""'}), "(model_name='cashflow', old_name=\n 'common_stock_issurance', new_name='common_stock_issuance')\n", (247, 344), False, 'from django.db import migrations\n')] |
import pytest
__author__ = "geyerbisschoff"
__copyright__ = "geyerbisschoff"
__license__ = "MIT"
def test_tobit_model():
"""API Tests"""
import pandas as pd
import numpy as np
from sklearn.datasets import make_regression
from statsmodels_tobit import TobitModel, TobitResults
rs = np.random.RandomState(seed=10)
ns = 1000
nf = 10
noise = 0
x, y_orig, coef = make_regression(n_samples=ns, n_features=nf, coef=True, noise=noise, random_state=rs)
x = pd.DataFrame(x)
y = pd.Series(y_orig)
n_quantiles = 3 # two-thirds of the data is truncated
quantile = 100 / float(n_quantiles)
lower = np.percentile(y, quantile)
upper = np.percentile(y, (n_quantiles - 1) * quantile)
y = y.clip(upper=upper, lower=lower)
tr = TobitModel(y, x, lower_bound=lower, upper_bound=upper).fit()
assert isinstance(tr, TobitResults)
assert np.all(np.round(tr.params[:-1], 4) == np.round(coef, 4))
assert np.round(np.exp(tr.params[-1]), 4) == noise
assert isinstance(tr.predict(which='all'), pd.DataFrame)
| [
"pandas.Series",
"sklearn.datasets.make_regression",
"numpy.round",
"statsmodels_tobit.TobitModel",
"numpy.exp",
"pandas.DataFrame",
"numpy.percentile",
"numpy.random.RandomState"
] | [((309, 339), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(10)'}), '(seed=10)\n', (330, 339), True, 'import numpy as np\n'), ((402, 491), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': 'ns', 'n_features': 'nf', 'coef': '(True)', 'noise': 'noise', 'random_state': 'rs'}), '(n_samples=ns, n_features=nf, coef=True, noise=noise,\n random_state=rs)\n', (417, 491), False, 'from sklearn.datasets import make_regression\n'), ((496, 511), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (508, 511), True, 'import pandas as pd\n'), ((520, 537), 'pandas.Series', 'pd.Series', (['y_orig'], {}), '(y_orig)\n', (529, 537), True, 'import pandas as pd\n'), ((649, 675), 'numpy.percentile', 'np.percentile', (['y', 'quantile'], {}), '(y, quantile)\n', (662, 675), True, 'import numpy as np\n'), ((688, 734), 'numpy.percentile', 'np.percentile', (['y', '((n_quantiles - 1) * quantile)'], {}), '(y, (n_quantiles - 1) * quantile)\n', (701, 734), True, 'import numpy as np\n'), ((786, 840), 'statsmodels_tobit.TobitModel', 'TobitModel', (['y', 'x'], {'lower_bound': 'lower', 'upper_bound': 'upper'}), '(y, x, lower_bound=lower, upper_bound=upper)\n', (796, 840), False, 'from statsmodels_tobit import TobitModel, TobitResults\n'), ((905, 932), 'numpy.round', 'np.round', (['tr.params[:-1]', '(4)'], {}), '(tr.params[:-1], 4)\n', (913, 932), True, 'import numpy as np\n'), ((936, 953), 'numpy.round', 'np.round', (['coef', '(4)'], {}), '(coef, 4)\n', (944, 953), True, 'import numpy as np\n'), ((975, 996), 'numpy.exp', 'np.exp', (['tr.params[-1]'], {}), '(tr.params[-1])\n', (981, 996), True, 'import numpy as np\n')] |
import os
import sys
import shutil
import tempfile
import subprocess as sp
import requests
from datetime import datetime
from pathlib import Path
import click
from git import Repo, IndexFile
from git.objects.commit import Commit
from halo import Halo
from colorama import init as colorama_init
from colorama import Fore, Back, Style
from ..utils.github_scanner import query_matching_repos, github_headers, get_github_endpoint_paged_list, query_matching_repos
from ..config.github import config_github
# The Pull request we made would fetch the first issue which has the same title
# as patch_branch to be the content
@click.command()
@click.argument('hw-prefix')
@click.argument('patch-branch')
@click.option('--source-repo', default='', help="default to tmpl-{hw-prefix}-revise")
@click.option('--token', default=config_github['personal_access_token'], help="github access token")
@click.option('--org', default=config_github['organization'], show_default=True)
@click.option('--only-repo', nargs=1, help="only repo to patch")
def patch_project(hw_prefix, patch_branch, source_repo, token, org, only_repo):
'''Patch to student homeworks'''
# init
colorama_init(autoreset=True)
spinner = Halo(stream=sys.stderr)
if source_repo == '':
source_repo = f'tmpl-{hw_prefix}-revise'
# Check if repo already contains the patched branch. Skip if so.
# api : https://developer.github.com/v3/git/refs/#get-a-reference
res = requests.get(
f"https://api.github.com/repos/{org}/{source_repo}/git/refs/heads/{patch_branch}", headers=github_headers(token))
if res.status_code != 200: # this branch not exists on the remote
spinner.fail(f"branch : `{patch_branch}` doesn't exist on repo:{org}/{source_repo} ")
return
cur = Path('.')
for d in cur.glob("patch-*"):
shutil.rmtree(d)
spinner.info("delete dated folder")
spinner.start(
f"Fetch issue template {Fore.CYAN}{patch_branch} {Fore.RESET}from {Fore.CYAN}{source_repo}")
# Fetch patch template on the source repo
issues = get_github_endpoint_paged_list(
endpoint=f"repos/{org}/{source_repo}/issues",
github_token=token,
verbose=False
)
issue_tmpl_found = False
for i in issues:
if i['title'] == patch_branch:
issue_tmpl_found = True
issue_tmpl_body = i['body']
break
if not issue_tmpl_found:
raise Exception(
f"cannot found issue tmpl `{patch_branch}` on `{source_repo}`")
spinner.succeed()
root_folder = Path(tempfile.mkdtemp(
prefix="patch-{}-{}-".format(patch_branch, datetime.now().strftime("%b%d%H%M%S")), dir="."))
spinner.succeed(f"Create tmp folder {Fore.YELLOW}{root_folder}")
spinner.info(
f"Fetch source repo {Fore.CYAN}{source_repo}{Style.RESET_ALL} from GitHub")
src_repo_path = root_folder / "source_repo"
sp.run(['git', 'clone',
f'https://github.com/{org}/{source_repo}.git', src_repo_path.name, ], cwd=root_folder)
src_repo = Repo(src_repo_path)
sp.run(['git', 'checkout', '--track', f'origin/{patch_branch}'],
cwd=src_repo_path, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
spinner.succeed()
# Pasting changed files into students repo
src_repo_git = src_repo.git
src_repo_git.checkout(patch_branch)
changed_files, renamed_files = get_changed_files(
master_commit=src_repo.heads['master'].commit,
patch_commit=src_repo.heads[patch_branch].commit
)
spinner.start("Fetch information for homework repo")
spinner.succeed()
if only_repo is not None:
repos = [re for re in query_matching_repos(org,
github_repo_prefix=only_repo,
github_token=token,
verbose=False) if re['name'] == only_repo]
repo = next(iter(repos), None)
if repo:
spinner.info(
f"Only patch to repo : {Fore.YELLOW}{repo['name']}{Style.RESET_ALL}")
repos = [repo]
else:
repos = query_matching_repos(org,
github_repo_prefix=hw_prefix,
github_token=token,
verbose=False)
spinner.succeed()
# Patch to student repos
student_path = root_folder / "student_repos"
student_path.mkdir()
for repo_idx, r in enumerate(repos, start=1):
pre_prompt_str = f"({repo_idx}/{len(repos)}) {Fore.YELLOW}{r['name']}{Fore.RESET}"
spinner.start()
# Check if repo already contains the patched branch. Skip if so.
# api : https://developer.github.com/v3/git/refs/#get-a-reference
res = requests.get(
f"https://api.github.com/repos/{org}/{r['name']}/git/refs/heads/{patch_branch}", headers=github_headers(token))
if res.status_code == 200: # this branch exists in the remote
spinner.text = pre_prompt_str + \
f" {Back.GREEN}{Fore.BLACK} Skip {Style.RESET_ALL} already patched"
spinner.succeed()
continue
spinner.text = pre_prompt_str + \
f" {Fore.BLUE}cloning repo..{Fore.RESET}"
sp.run(['git', 'clone', '--depth=1', r['html_url']],
cwd=student_path, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
hw_repo_name = r['html_url'].rsplit("/")[-1]
# open a new branch & checkout to that branch
sp.run(['git', 'checkout', '-b', patch_branch],
cwd=student_path/hw_repo_name, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# copy file to student repo
for f in changed_files.keys():
(student_path/hw_repo_name/f).parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src=src_repo_path/f,
dst=student_path/hw_repo_name/f)
for f in renamed_files.keys():
os.remove(student_path/hw_repo_name/f)
# changed_files = get_changed_files(
# master_commit = src_repo.heads['master'].commit,
# patch_commit = src_repo.heads[patch_branch].commit
# )
# push (publish) that branch to student repo
sp.run(['git', 'add', '.'], cwd=student_path/hw_repo_name,
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# Pass if no changed
student_repo = Repo(student_path/hw_repo_name)
if len(student_repo.index.diff("HEAD")) == 0:
spinner.text = pre_prompt_str + \
f" {Back.GREEN}{Fore.BLACK} Passed {Style.RESET_ALL} Repo no change"
spinner.succeed()
continue
sp.run(['git', 'commit', '-m', f':construction_worker: Patch: {patch_branch}'],
cwd=student_path/hw_repo_name,
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
spinner.text = pre_prompt_str + \
f" {Fore.BLUE}publish patch to remote..{Fore.RESET}"
res = sp.run(['git', 'push', '-u', 'origin', patch_branch], cwd=student_path/hw_repo_name,
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
if res.returncode != 0:
spinner.text = (pre_prompt_str + f" {Back.RED}{Fore.BLACK} Failed {Style.RESET_ALL}"
+ f" Cannot push branch {Fore.CYAN}{patch_branch}{Fore.RESET} to origin")
spinner.fail()
continue
# open an pull-request on students repo
# student_repo/patch-branch -> student_repo/master
body = {
"title": f"[PATCH] {patch_branch}",
"body": issue_tmpl_body,
"head": patch_branch,
"base": "master"
}
res = requests.post(f"https://api.github.com/repos/{org}/{r['name']}/pulls",
headers=github_headers(token), json=body)
if res.status_code == 201:
spinner.text = pre_prompt_str + \
f" {Fore.BLACK}{Back.GREEN} Patched {Style.RESET_ALL}"
spinner.succeed()
else:
spinner.text = (pre_prompt_str + f" {Back.RED}{Fore.BLACK} Failed {Style.RESET_ALL}"
+ f" Cannot create PR {Fore.CYAN}{patch_branch}{Fore.RESET} to origin/master")
spinner.fail()
try:
print(f" {Fore.RED}{res.json()['errors'][0]['message']}")
except:
pass
continue
# TODO : print summary after patch
# how many success, skiped, failed
pass
def get_changed_files(master_commit: Commit, patch_commit: Commit):
# TODO
# git change type support:
# A: addition of a file
# C: copy of a file into a new one
# D: deletion of a file
# done M: modification of the contents or mode of a file
# R: renaming of a file
# T: change in the type of the file
# U: file is unmerged (you must complete the merge before it can be committed)
# X: "unknown" change type (most probably a bug, please report it)
changed_files = {}
renamed_files = {}
for x in master_commit.diff(patch_commit):
# Change type of x is not 'new'
if x.a_blob != None and x.a_blob.path not in changed_files.keys():
if x.change_type == 'R':
# file have been renamed, the dest file is include in the changed files
# so we need to delete this file from dest repo
# print(f'a remove(rename) :{x.a_blob.path}, type: {x.change_type}')
renamed_files[x.a_blob.path] = {'type': x.change_type}
else:
# print(f'a change :{x.a_blob.path}, type: {x.change_type}')
changed_files[x.a_blob.path] = {'type': x.change_type}
# Change type of x is not 'delete'
if x.b_blob is not None and x.b_blob.path not in changed_files.keys():
# print(f'b change :{x.b_blob.path}, type: {x.change_type}')
changed_files[x.b_blob.path] = {'type': x.change_type}
return changed_files, renamed_files
if __name__ == "__main__":
patch_project()
| [
"click.argument",
"halo.Halo",
"pathlib.Path",
"click.option",
"subprocess.run",
"shutil.rmtree",
"datetime.datetime.now",
"shutil.copyfile",
"git.Repo",
"click.command",
"colorama.init",
"os.remove"
] | [((628, 643), 'click.command', 'click.command', ([], {}), '()\n', (641, 643), False, 'import click\n'), ((645, 672), 'click.argument', 'click.argument', (['"""hw-prefix"""'], {}), "('hw-prefix')\n", (659, 672), False, 'import click\n'), ((674, 704), 'click.argument', 'click.argument', (['"""patch-branch"""'], {}), "('patch-branch')\n", (688, 704), False, 'import click\n'), ((706, 795), 'click.option', 'click.option', (['"""--source-repo"""'], {'default': '""""""', 'help': '"""default to tmpl-{hw-prefix}-revise"""'}), "('--source-repo', default='', help=\n 'default to tmpl-{hw-prefix}-revise')\n", (718, 795), False, 'import click\n'), ((792, 895), 'click.option', 'click.option', (['"""--token"""'], {'default': "config_github['personal_access_token']", 'help': '"""github access token"""'}), "('--token', default=config_github['personal_access_token'],\n help='github access token')\n", (804, 895), False, 'import click\n'), ((893, 972), 'click.option', 'click.option', (['"""--org"""'], {'default': "config_github['organization']", 'show_default': '(True)'}), "('--org', default=config_github['organization'], show_default=True)\n", (905, 972), False, 'import click\n'), ((974, 1037), 'click.option', 'click.option', (['"""--only-repo"""'], {'nargs': '(1)', 'help': '"""only repo to patch"""'}), "('--only-repo', nargs=1, help='only repo to patch')\n", (986, 1037), False, 'import click\n'), ((1170, 1199), 'colorama.init', 'colorama_init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (1183, 1199), True, 'from colorama import init as colorama_init\n'), ((1214, 1237), 'halo.Halo', 'Halo', ([], {'stream': 'sys.stderr'}), '(stream=sys.stderr)\n', (1218, 1237), False, 'from halo import Halo\n'), ((1792, 1801), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (1796, 1801), False, 'from pathlib import Path\n'), ((2925, 3037), 'subprocess.run', 'sp.run', (["['git', 'clone', f'https://github.com/{org}/{source_repo}.git',\n src_repo_path.name]"], {'cwd': 'root_folder'}), "(['git', 'clone', f'https://github.com/{org}/{source_repo}.git',\n src_repo_path.name], cwd=root_folder)\n", (2931, 3037), True, 'import subprocess as sp\n'), ((3064, 3083), 'git.Repo', 'Repo', (['src_repo_path'], {}), '(src_repo_path)\n', (3068, 3083), False, 'from git import Repo, IndexFile\n'), ((3088, 3214), 'subprocess.run', 'sp.run', (["['git', 'checkout', '--track', f'origin/{patch_branch}']"], {'cwd': 'src_repo_path', 'stdout': 'sp.DEVNULL', 'stderr': 'sp.DEVNULL'}), "(['git', 'checkout', '--track', f'origin/{patch_branch}'], cwd=\n src_repo_path, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n", (3094, 3214), True, 'import subprocess as sp\n'), ((1844, 1860), 'shutil.rmtree', 'shutil.rmtree', (['d'], {}), '(d)\n', (1857, 1860), False, 'import shutil\n'), ((5315, 5427), 'subprocess.run', 'sp.run', (["['git', 'clone', '--depth=1', r['html_url']]"], {'cwd': 'student_path', 'stdout': 'sp.DEVNULL', 'stderr': 'sp.DEVNULL'}), "(['git', 'clone', '--depth=1', r['html_url']], cwd=student_path,\n stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n", (5321, 5427), True, 'import subprocess as sp\n'), ((5556, 5678), 'subprocess.run', 'sp.run', (["['git', 'checkout', '-b', patch_branch]"], {'cwd': '(student_path / hw_repo_name)', 'stdout': 'sp.DEVNULL', 'stderr': 'sp.DEVNULL'}), "(['git', 'checkout', '-b', patch_branch], cwd=student_path /\n hw_repo_name, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n", (5562, 5678), True, 'import subprocess as sp\n'), ((6295, 6398), 'subprocess.run', 'sp.run', (["['git', 'add', '.']"], {'cwd': '(student_path / hw_repo_name)', 'stdout': 'sp.DEVNULL', 'stderr': 'sp.DEVNULL'}), "(['git', 'add', '.'], cwd=student_path / hw_repo_name, stdout=sp.\n DEVNULL, stderr=sp.DEVNULL)\n", (6301, 6398), True, 'import subprocess as sp\n'), ((6460, 6493), 'git.Repo', 'Repo', (['(student_path / hw_repo_name)'], {}), '(student_path / hw_repo_name)\n', (6464, 6493), False, 'from git import Repo, IndexFile\n'), ((6737, 6895), 'subprocess.run', 'sp.run', (["['git', 'commit', '-m', f':construction_worker: Patch: {patch_branch}']"], {'cwd': '(student_path / hw_repo_name)', 'stdout': 'sp.DEVNULL', 'stderr': 'sp.DEVNULL'}), "(['git', 'commit', '-m',\n f':construction_worker: Patch: {patch_branch}'], cwd=student_path /\n hw_repo_name, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n", (6743, 6895), True, 'import subprocess as sp\n'), ((7038, 7166), 'subprocess.run', 'sp.run', (["['git', 'push', '-u', 'origin', patch_branch]"], {'cwd': '(student_path / hw_repo_name)', 'stdout': 'sp.DEVNULL', 'stderr': 'sp.DEVNULL'}), "(['git', 'push', '-u', 'origin', patch_branch], cwd=student_path /\n hw_repo_name, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n", (7044, 7166), True, 'import subprocess as sp\n'), ((5860, 5935), 'shutil.copyfile', 'shutil.copyfile', ([], {'src': '(src_repo_path / f)', 'dst': '(student_path / hw_repo_name / f)'}), '(src=src_repo_path / f, dst=student_path / hw_repo_name / f)\n', (5875, 5935), False, 'import shutil\n'), ((6009, 6051), 'os.remove', 'os.remove', (['(student_path / hw_repo_name / f)'], {}), '(student_path / hw_repo_name / f)\n', (6018, 6051), False, 'import os\n'), ((2651, 2665), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2663, 2665), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
import json
import re
import socketserver
import http.server
def unique_id_sequence():
i = 0
while True:
yield i
i += 1
unique_id_sequence= unique_id_sequence() # don't try this at home
class Handler(http.server.SimpleHTTPRequestHandler):
def send_all_good(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self.send_all_good()
self.wfile.write(
json.dumps(
[
{
'id': next(unique_id_sequence),
'name': "cositas para la casa",
'items':[
{
'id': next(unique_id_sequence),
'name': '<NAME>',
'addedBy': {
'name': 'Moni',
},
'buyAt': [],
'bought': False,
},
{
'id': next(unique_id_sequence),
'name': '<NAME>',
'addedBy': {
'name': 'Moni',
},
# 'buyAt': [ 'Hofer', 'Billa' ],
'buyAt': [],
'bought': True,
},
{
'id': next(unique_id_sequence),
'name': 'calabacín',
'addedBy': {
'name': 'Moni',
},
# 'buyAt': [ 'Hofer', 'Billa' ],
'buyAt': [],
'bought': False,
},
{
'id': next(unique_id_sequence),
'name': 'dulcítas',
'addedBy': {
'name': 'Moni',
},
# 'buyAt': [ 'Hofer', 'Billa' ],
'buyAt': [],
'bought': True,
},
]
},
{
'id': next(unique_id_sequence),
'name': "cositas para comer",
'items': [],
}
],
).encode('utf-8') + b'\n'
)
def do_POST(self):
self.send_all_good()
# assume query to add list
content = self.read_json_content()
print ("Post to path", self.path)
add_item_match = re.match(r".*/list/(?P<list_id>\d+)/$", self.path)
if add_item_match:
print("Adding new item")
list_id = add_item_match.group("list_id")
self.wfile.write(
json.dumps(
{
'id': next(unique_id_sequence),
'name': content['name'],
'addedBy': {
'name': 'Moni',
},
'buyAt': [],
'bought': False,
}
).encode('utf-8') + b'\n'
)
else:
print("Adding new list")
self.wfile.write(
json.dumps(
{
'id': next(unique_id_sequence),
'name': content['name'],
'items': [],
}
).encode('utf-8') + b'\n'
)
def do_PATCH(self):
self.send_all_good()
content = self.read_json_content()
self.wfile.write(
json.dumps(
{
'bought': content['bought']
},
).encode('utf-8') + b'\n'
)
def read_json_content(self):
length = int(self.headers['Content-Length'])
content_str = self.rfile.read(length)
return json.loads(content_str)
port = 8008
print(f'Server listening on port {port}...')
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(('127.0.0.1', port), Handler)
httpd.serve_forever()
| [
"json.dumps",
"json.loads",
"socketserver.TCPServer",
"re.match"
] | [((4565, 4617), 'socketserver.TCPServer', 'socketserver.TCPServer', (["('127.0.0.1', port)", 'Handler'], {}), "(('127.0.0.1', port), Handler)\n", (4587, 4617), False, 'import socketserver\n'), ((3023, 3073), 're.match', 're.match', (['""".*/list/(?P<list_id>\\\\d+)/$"""', 'self.path'], {}), "('.*/list/(?P<list_id>\\\\d+)/$', self.path)\n", (3031, 3073), False, 'import re\n'), ((4420, 4443), 'json.loads', 'json.loads', (['content_str'], {}), '(content_str)\n', (4430, 4443), False, 'import json\n'), ((4127, 4168), 'json.dumps', 'json.dumps', (["{'bought': content['bought']}"], {}), "({'bought': content['bought']})\n", (4137, 4168), False, 'import json\n')] |
from django.db import models
# Create your models here.
#Reception Hall Package class
class ReceptionHallPackage(models.Model):
RH_packageID = models.IntegerField
theme = models.CharField(max_length=20)
price = models.FloatField(max_length=10)
description = models.CharField(max_length=30)
#Reception Hall reservation class
class ReceptionHallBook(models.Model):
RH_reserveID = models.IntegerField
cusId = models.CharField(max_length=10)
theme = models.CharField(max_length=20)
date = models.DateField
timeFrom = models.CharField
timeTo = models.TimeField
| [
"django.db.models.FloatField",
"django.db.models.CharField"
] | [((201, 232), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (217, 232), False, 'from django.db import models\n'), ((256, 288), 'django.db.models.FloatField', 'models.FloatField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (273, 288), False, 'from django.db import models\n'), ((312, 343), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (328, 343), False, 'from django.db import models\n'), ((493, 524), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (509, 524), False, 'from django.db import models\n'), ((548, 579), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (564, 579), False, 'from django.db import models\n')] |
import tempfile
import unittest
import mock
from transformers import AutoTokenizer, PegasusForConditionalGeneration, PegasusConfig
from test.test_loaders import create_test_loader
from test.testing_common_utils import values_tensor
from src.bayesian_summarization.bayesian import BayesianSummarizer
class TestBayesianSummarizer(unittest.TestCase):
@mock.patch("src.bayesian_summarization.bayesian.convert_bayesian_model")
@mock.patch("src.summarization.sum_base.load_model")
def setUp(self, mock_model_loader, mock_mc_model) -> None:
self.batch_size = 2
self.num_beams = 3
self.sequence_length = 10
self.vocab_size = 99
self.bayesian_summarizer = BayesianSummarizer(
model_name_or_path="test_path",
tokenizer_name="google/pegasus-xsum",
text_column="document",
summary_column="summary",
seed=111,
max_source_length=self.sequence_length,
num_beams=self.num_beams,
)
dummy_config = PegasusConfig()
model = PegasusForConditionalGeneration(config=dummy_config)
tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
mock_model_loader.return_value = model, tokenizer
mock_mc_model.return_value = model
self.bayesian_summarizer.init_sum()
def tearDown(self) -> None:
del self.bayesian_summarizer
@mock.patch("transformers.PegasusForConditionalGeneration.generate")
def test_mc_dropout(self, mock_generation):
input_text = "This is the first sentence. This is the second sentence. This is the third sentence."
target_text = "This is a generated summary."
mock_input_ids = [[182, 117, 109, 211, 5577, 107, 182, 117, 109, 1]]
mock_input_ids_tensor = values_tensor(mock_input_ids)
mock_gen_ids = [[182, 117, 114, 3943, 5627, 107, 1]]
mock_gen_ids_tensor = values_tensor(mock_gen_ids)
batch = {
"document": input_text,
}
mock_generation.return_value = {
"sequences": mock_gen_ids_tensor,
}
generations, input_ids = self.bayesian_summarizer.run_mc_dropout(batch, 3)
self.assertEqual(generations[0][0], target_text)
self.assertListEqual(input_ids[0].tolist(), mock_input_ids_tensor[0].tolist())
@mock.patch("transformers.PegasusForConditionalGeneration.generate")
def test_mc_dropout_batch(self, mock_generation):
input_text = "This is the first sentence. This is the second sentence. This is the third sentence."
target_text = "This is a generated summary."
mock_gen_ids = [[182, 117, 114, 3943, 5627, 107, 1]]
mock_gen_ids_tensor = values_tensor(mock_gen_ids)
batch = {
"document": input_text,
}
mock_generation.return_value = {
"sequences": mock_gen_ids_tensor,
}
generations, gen_ids = self.bayesian_summarizer.mc_dropout_batch(
batch=batch,
n=3,
num_articles=0)
self.assertEqual(generations[0][0], target_text)
self.assertEqual(len(gen_ids), 1)
@mock.patch("transformers.PegasusForConditionalGeneration.generate")
def test_generate_bayesian(self, mock_generation):
mock_gen_ids = [[182, 117, 114, 3943, 5627, 107, 1]]
mock_gen_ids_tensor = values_tensor(mock_gen_ids)
args = {
"test_batch_size": 1,
"dataset_name": None,
"dataset_config_name": "",
"max_test_samples": 4,
}
with tempfile.TemporaryDirectory() as tmp_dir:
test_loader = create_test_loader(args=args, tmp_dir=tmp_dir)
mock_generation.return_value = {
"sequences": mock_gen_ids_tensor,
}
generated_sums, target_sums, article_ids, bleuvars = self.bayesian_summarizer.generate_bayesian_summaries(
dataloader=test_loader, n=3)
self.assertEqual(len(generated_sums), args["max_test_samples"])
self.assertEqual(len(target_sums), args["max_test_samples"])
self.assertEqual(len(article_ids), args["max_test_samples"])
self.assertEqual(len(bleuvars), args["max_test_samples"])
del test_loader
@mock.patch("transformers.PegasusForConditionalGeneration.generate")
def test_generate_mc_summaries(self, mock_generation):
mock_gen_ids = [[182, 117, 114, 3943, 5627, 107, 1]]
mock_gen_ids_tensor = values_tensor(mock_gen_ids)
args = {
"test_batch_size": 1,
"dataset_name": None,
"dataset_config_name": "",
"max_test_samples": 4,
}
with tempfile.TemporaryDirectory() as tmp_dir:
test_loader = create_test_loader(args=args, tmp_dir=tmp_dir)
mock_generation.return_value = {
"sequences": mock_gen_ids_tensor,
}
generated_sums = self.bayesian_summarizer.generate_mc_summaries(test_loader, n=3)
self.assertEqual(len(generated_sums), args["max_test_samples"])
self.assertEqual(len(generated_sums[0]), 3)
del test_loader
| [
"tempfile.TemporaryDirectory",
"mock.patch",
"transformers.PegasusForConditionalGeneration",
"test.testing_common_utils.values_tensor",
"transformers.PegasusConfig",
"src.bayesian_summarization.bayesian.BayesianSummarizer",
"transformers.AutoTokenizer.from_pretrained",
"test.test_loaders.create_test_loader"
] | [((357, 429), 'mock.patch', 'mock.patch', (['"""src.bayesian_summarization.bayesian.convert_bayesian_model"""'], {}), "('src.bayesian_summarization.bayesian.convert_bayesian_model')\n", (367, 429), False, 'import mock\n'), ((435, 486), 'mock.patch', 'mock.patch', (['"""src.summarization.sum_base.load_model"""'], {}), "('src.summarization.sum_base.load_model')\n", (445, 486), False, 'import mock\n'), ((1417, 1484), 'mock.patch', 'mock.patch', (['"""transformers.PegasusForConditionalGeneration.generate"""'], {}), "('transformers.PegasusForConditionalGeneration.generate')\n", (1427, 1484), False, 'import mock\n'), ((2349, 2416), 'mock.patch', 'mock.patch', (['"""transformers.PegasusForConditionalGeneration.generate"""'], {}), "('transformers.PegasusForConditionalGeneration.generate')\n", (2359, 2416), False, 'import mock\n'), ((3163, 3230), 'mock.patch', 'mock.patch', (['"""transformers.PegasusForConditionalGeneration.generate"""'], {}), "('transformers.PegasusForConditionalGeneration.generate')\n", (3173, 3230), False, 'import mock\n'), ((4304, 4371), 'mock.patch', 'mock.patch', (['"""transformers.PegasusForConditionalGeneration.generate"""'], {}), "('transformers.PegasusForConditionalGeneration.generate')\n", (4314, 4371), False, 'import mock\n'), ((703, 926), 'src.bayesian_summarization.bayesian.BayesianSummarizer', 'BayesianSummarizer', ([], {'model_name_or_path': '"""test_path"""', 'tokenizer_name': '"""google/pegasus-xsum"""', 'text_column': '"""document"""', 'summary_column': '"""summary"""', 'seed': '(111)', 'max_source_length': 'self.sequence_length', 'num_beams': 'self.num_beams'}), "(model_name_or_path='test_path', tokenizer_name=\n 'google/pegasus-xsum', text_column='document', summary_column='summary',\n seed=111, max_source_length=self.sequence_length, num_beams=self.num_beams)\n", (721, 926), False, 'from src.bayesian_summarization.bayesian import BayesianSummarizer\n'), ((1037, 1052), 'transformers.PegasusConfig', 'PegasusConfig', ([], {}), '()\n', (1050, 1052), False, 'from transformers import AutoTokenizer, PegasusForConditionalGeneration, PegasusConfig\n'), ((1069, 1121), 'transformers.PegasusForConditionalGeneration', 'PegasusForConditionalGeneration', ([], {'config': 'dummy_config'}), '(config=dummy_config)\n', (1100, 1121), False, 'from transformers import AutoTokenizer, PegasusForConditionalGeneration, PegasusConfig\n'), ((1142, 1194), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""google/pegasus-xsum"""'], {}), "('google/pegasus-xsum')\n", (1171, 1194), False, 'from transformers import AutoTokenizer, PegasusForConditionalGeneration, PegasusConfig\n'), ((1803, 1832), 'test.testing_common_utils.values_tensor', 'values_tensor', (['mock_input_ids'], {}), '(mock_input_ids)\n', (1816, 1832), False, 'from test.testing_common_utils import values_tensor\n'), ((1925, 1952), 'test.testing_common_utils.values_tensor', 'values_tensor', (['mock_gen_ids'], {}), '(mock_gen_ids)\n', (1938, 1952), False, 'from test.testing_common_utils import values_tensor\n'), ((2723, 2750), 'test.testing_common_utils.values_tensor', 'values_tensor', (['mock_gen_ids'], {}), '(mock_gen_ids)\n', (2736, 2750), False, 'from test.testing_common_utils import values_tensor\n'), ((3377, 3404), 'test.testing_common_utils.values_tensor', 'values_tensor', (['mock_gen_ids'], {}), '(mock_gen_ids)\n', (3390, 3404), False, 'from test.testing_common_utils import values_tensor\n'), ((4522, 4549), 'test.testing_common_utils.values_tensor', 'values_tensor', (['mock_gen_ids'], {}), '(mock_gen_ids)\n', (4535, 4549), False, 'from test.testing_common_utils import values_tensor\n'), ((3587, 3616), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3614, 3616), False, 'import tempfile\n'), ((3655, 3701), 'test.test_loaders.create_test_loader', 'create_test_loader', ([], {'args': 'args', 'tmp_dir': 'tmp_dir'}), '(args=args, tmp_dir=tmp_dir)\n', (3673, 3701), False, 'from test.test_loaders import create_test_loader\n'), ((4732, 4761), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4759, 4761), False, 'import tempfile\n'), ((4800, 4846), 'test.test_loaders.create_test_loader', 'create_test_loader', ([], {'args': 'args', 'tmp_dir': 'tmp_dir'}), '(args=args, tmp_dir=tmp_dir)\n', (4818, 4846), False, 'from test.test_loaders import create_test_loader\n')] |
import math
import uuid
from unittest.mock import mock_open, patch
from django.urls import reverse
from ...test.director_test import DirectorTestCase
from .utils import (
find_static_file,
get_page_title,
load_doc_page,
rewrite_markdown_link,
url_to_path,
)
class DocsTestCase(DirectorTestCase):
def test_doc_page_view(self):
self.login()
# We're going to patch the load_doc_page() call to get
# the content I want
header_magic_text = str(uuid.uuid4())
with patch(
"director.apps.docs.views.load_doc_page",
return_value=({}, f"<h1>{header_magic_text}</h1>"),
) as mock_obj:
response = self.client.get(
reverse("docs:doc_page", kwargs={"url": "index"}) + "/", follow=True
)
mock_obj.assert_called()
self.assertEqual(200, response.status_code)
self.assertIn(header_magic_text, response.content.decode("UTF-8"))
def test_search_view(self):
self.login()
# We're going to patch the load_doc_page() call to get
# the content I want
header_magic_text = str(uuid.uuid4())
with patch(
"director.apps.docs.views.load_doc_page",
return_value=({}, f"<h1>{header_magic_text}</h1>"),
) as mock_obj:
with patch(
"director.apps.docs.views.iter_page_names", return_value=["hello"]
) as mock_iter_obj:
response = self.client.get(
reverse("docs:search"), follow=True, data={"q": header_magic_text}
)
self.assertIn(("hello", "Hello", 1), response.context["results"])
mock_obj.assert_called()
mock_iter_obj.assert_called()
class DocsUtilsTestCase(DirectorTestCase):
def test_rewrite_markdown_link(self):
self.assertEqual(
"/check/",
rewrite_markdown_link(
link_url="check.md", base_page_name="check", add_docs_prefix=False
),
)
self.assertEqual(
"/check/",
rewrite_markdown_link(link_url="check", base_page_name="check", add_docs_prefix=False),
)
self.assertEqual(
"/check/",
rewrite_markdown_link(link_url="check", base_page_name="/check", add_docs_prefix=False),
)
self.assertEqual(
"/check/",
rewrite_markdown_link(
link_url="/check", base_page_name="/check", add_docs_prefix=False
),
)
self.assertEqual(
"/check.html",
rewrite_markdown_link(
link_url="check.html", base_page_name="check", add_docs_prefix=False
),
)
self.assertEqual(
"mailto:<EMAIL>",
rewrite_markdown_link(
link_url="mailto:<EMAIL>", base_page_name="", add_docs_prefix=True
),
)
def test_url_to_path(self):
self.assertIsNone(url_to_path("https://director.tjhsst.edu/../../../../etc/passwd"))
self.assertIsNone(url_to_path("https://director.tjhsst.edu/etc/passwd/.git/"))
self.assertIsNotNone(url_to_path("https://director.tjhsst.edu/etc/passwd/"))
def test_load_doc_page(self):
with patch("builtins.open", mock_open(read_data="# hello")):
with patch("os.path.exists", return_value=True):
with patch("os.path.getmtime", return_value=math.inf):
html = load_doc_page("hello.md")
self.assertIn("<h1", html[1])
self.assertIn(">hello", html[1])
def test_find_static_file(self):
self.assertIsNone(find_static_file("/.././../../../../"))
with self.settings(DIRECTOR_DOCS_DIR="/testing"):
self.assertEqual("/testing/hello.html", find_static_file("hello.html"))
def test_get_page_title(self):
self.assertEqual(
"hello this is a test",
get_page_title(
page_name="hello this is a test",
metadata={"title": ["hello", "this", "is", "a", "test"]},
),
)
self.assertEqual(
"Hello This Is A Test", get_page_title(page_name="hello this is a test", metadata={})
)
self.assertEqual("Howto", get_page_title(page_name="/custom-domains/howto", metadata={}))
| [
"unittest.mock.mock_open",
"django.urls.reverse",
"unittest.mock.patch",
"uuid.uuid4"
] | [((500, 512), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (510, 512), False, 'import uuid\n'), ((527, 630), 'unittest.mock.patch', 'patch', (['"""director.apps.docs.views.load_doc_page"""'], {'return_value': "({}, f'<h1>{header_magic_text}</h1>')"}), "('director.apps.docs.views.load_doc_page', return_value=({},\n f'<h1>{header_magic_text}</h1>'))\n", (532, 630), False, 'from unittest.mock import mock_open, patch\n'), ((1154, 1166), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1164, 1166), False, 'import uuid\n'), ((1181, 1284), 'unittest.mock.patch', 'patch', (['"""director.apps.docs.views.load_doc_page"""'], {'return_value': "({}, f'<h1>{header_magic_text}</h1>')"}), "('director.apps.docs.views.load_doc_page', return_value=({},\n f'<h1>{header_magic_text}</h1>'))\n", (1186, 1284), False, 'from unittest.mock import mock_open, patch\n'), ((1346, 1419), 'unittest.mock.patch', 'patch', (['"""director.apps.docs.views.iter_page_names"""'], {'return_value': "['hello']"}), "('director.apps.docs.views.iter_page_names', return_value=['hello'])\n", (1351, 1419), False, 'from unittest.mock import mock_open, patch\n'), ((3318, 3348), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': '"""# hello"""'}), "(read_data='# hello')\n", (3327, 3348), False, 'from unittest.mock import mock_open, patch\n'), ((3368, 3410), 'unittest.mock.patch', 'patch', (['"""os.path.exists"""'], {'return_value': '(True)'}), "('os.path.exists', return_value=True)\n", (3373, 3410), False, 'from unittest.mock import mock_open, patch\n'), ((731, 780), 'django.urls.reverse', 'reverse', (['"""docs:doc_page"""'], {'kwargs': "{'url': 'index'}"}), "('docs:doc_page', kwargs={'url': 'index'})\n", (738, 780), False, 'from django.urls import reverse\n'), ((1532, 1554), 'django.urls.reverse', 'reverse', (['"""docs:search"""'], {}), "('docs:search')\n", (1539, 1554), False, 'from django.urls import reverse\n'), ((3433, 3481), 'unittest.mock.patch', 'patch', (['"""os.path.getmtime"""'], {'return_value': 'math.inf'}), "('os.path.getmtime', return_value=math.inf)\n", (3438, 3481), False, 'from unittest.mock import mock_open, patch\n')] |
# Handshake
"""
PSTR: String identifier of the Bittorrent protocol V1, 19 bytes.
PSTR_LEN: PSTR length, 1 byte.
RESERVED: 8 bytes, zeroes for example.
PEER_ID_PREFIX: 8 bytes used as an unique ID for the client. "SL" is the
peer_id chosen for this client. The full PEER_ID is 20 bytes,
the rest of it is built using random numbers.
The full handshake message consists of:
<PSTR_LEN><PSTR><RESERVED><INFO_HASH><PEER_ID>
The full handshake consists of 68 bytes.
"""
from stilio.crawler.bittorrent.bencoding import encode
PSTR = "BitTorrent protocol"
PSTR_LEN = chr(len(PSTR))
RESERVED = "\x00\x00\x00\x00\x00\x10\x00\x01"
PEER_ID_PREFIX = "-SL0001-"
BT_PROTOCOL_PREFIX = bytes(PSTR_LEN + PSTR + RESERVED, encoding="utf-8")
# Extension
ID_EXTENDED_MESSAGE = 20
ID_EXTENDED_HANDSHAKE = 0
EXTENDED_HANDSHAKE_MESSAGE = bytes(
[ID_EXTENDED_MESSAGE, ID_EXTENDED_HANDSHAKE]
) + encode({b"m": {b"ut_metadata": 1}})
| [
"stilio.crawler.bittorrent.bencoding.encode"
] | [((910, 945), 'stilio.crawler.bittorrent.bencoding.encode', 'encode', (["{b'm': {b'ut_metadata': 1}}"], {}), "({b'm': {b'ut_metadata': 1}})\n", (916, 945), False, 'from stilio.crawler.bittorrent.bencoding import encode\n')] |
from rest_framework import routers
from . import viewset
router = routers.SimpleRouter()
router.register('AcaUser', viewset.AcaUserViewSet)
router.register('AcaEvent', viewset.AcaEventViewSet)
router.register('AcaEventNoPage', viewset.AcaEventViewSetNopage)
router.register('AcaResultset', viewset.AcaResultsetViewSet)
router.register('AcaRacegroup', viewset.AcaRacegroupViewSet)
router.register('AcaResult', viewset.AcaResultViewSet)
router.register('AcaResultNoPage', viewset.AcaResultNoPageViewSet)
router.register('AcaPointscompetition', viewset.AcaPointscompetitionViewSet)
router.register('AcaPointscompetitionraceresult', viewset.AcaPointscompetitionraceresultViewSet)
router.register('AcaPointscompetitionresult', viewset.AcaPointscompetitionresultViewSet)
| [
"rest_framework.routers.SimpleRouter"
] | [((67, 89), 'rest_framework.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (87, 89), False, 'from rest_framework import routers\n')] |
import numpy as np
from dqn import *
layers = [12, 64, 32, 4]
model = LinearDQN(layers)
input_arr = np.random.uniform(size=(3, 12))
print(model.forward(input_arr))
print(model.update(input_arr).shape)
| [
"numpy.random.uniform"
] | [((103, 134), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3, 12)'}), '(size=(3, 12))\n', (120, 134), True, 'import numpy as np\n')] |
import numpy as np
import cv2
# number of corners
VERT = 5
HORI = 7
ROWS = VERT + 1
COLS = HORI + 1
BLOCKSIZE = 100
MARGIN = 25
HEIGHT = ROWS*BLOCKSIZE + 2*MARGIN
WIDTH = COLS*BLOCKSIZE + 2*MARGIN
img = np.ones((HEIGHT, WIDTH), np.uint8)*255
for r in range(ROWS):
for c in range(COLS):
if (r+c) % 2 == 1:
x = MARGIN + BLOCKSIZE*c
y = MARGIN + BLOCKSIZE*r
img[y:y+BLOCKSIZE, x:x+BLOCKSIZE] = 0
cv2.imwrite('chessboard_{0}x{1}.png'.format(VERT, HORI), img)
| [
"numpy.ones"
] | [((207, 241), 'numpy.ones', 'np.ones', (['(HEIGHT, WIDTH)', 'np.uint8'], {}), '((HEIGHT, WIDTH), np.uint8)\n', (214, 241), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from functools import reduce
import math
__all__ = ['skgrcnn55', 'skgrcnn109', 'grcnn109', 'grcnn55']
class SKConv(nn.Module):
def __init__(self,in_channels,out_channels,stride=1,M=2,r=16,L=32, groups=32):
super(SKConv,self).__init__()
d=max(in_channels//r,L)
self.M=M
self.out_channels=out_channels
self.conv=nn.ModuleList()
for i in range(M):
conv1 = nn.Conv2d(in_channels,out_channels,3,stride,padding=1+i,dilation=1+i,groups=groups,bias=False)
init.kaiming_normal_(conv1.weight)
self.conv.append(nn.Sequential(conv1,
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
self.global_pool=nn.AdaptiveAvgPool2d(1)
conv_fc = nn.Conv2d(out_channels,d,1,bias=False)
init.normal_(conv_fc.weight, std=0.01)
self.fc1=nn.Sequential(conv_fc,
nn.BatchNorm2d(d),
nn.ReLU(inplace=True))
self.fc2=nn.Conv2d(d,out_channels*M,1,1,bias=False)
init.normal_(self.fc2.weight, std=0.01)
self.softmax=nn.Softmax(dim=1)
def forward(self, input):
batch_size=input.size(0)
output=[]
for i,conv in enumerate(self.conv):
output.append(conv(input))
U=reduce(lambda x,y:x+y,output)
s=self.global_pool(U)
z=self.fc1(s)
a_b=self.fc2(z)
a_b=a_b.reshape(batch_size,self.M,self.out_channels,-1)
a_b=self.softmax(a_b)
a_b=list(a_b.chunk(self.M,dim=1))
a_b=list(map(lambda x:x.reshape(batch_size,self.out_channels,1,1),a_b))
V=list(map(lambda x,y:x*y,output,a_b))
V=reduce(lambda x,y:x+y,V)
return V
class GRCL(nn.Module):
def __init__(self, inplanes, planes, downsample=True, iter = 3, SKconv=True, expansion=2):
super(GRCL, self).__init__()
self.iter = iter
self.expansion = expansion
# feed-forward part
self.add_module('bn_f', nn.BatchNorm2d(inplanes))
self.add_module('relu_f', nn.ReLU(inplace=True))
conv_f = nn.Conv2d(inplanes, int(planes* self.expansion), kernel_size=3, stride=1, padding=1, bias=False, groups=32)
init.kaiming_normal_(conv_f.weight)
self.add_module('conv_f', conv_f)
self.add_module('bn_g_f', nn.BatchNorm2d(inplanes))
self.add_module('relu_g_f', nn.ReLU(inplace=True))
conv_g_f = nn.Conv2d(inplanes, int(planes* self.expansion), kernel_size=1, stride=1, padding=0, bias=True, groups=32)
init.normal_(conv_g_f.weight, std=0.01)
self.add_module('conv_g_f', conv_g_f)
self.conv_g_r = nn.Conv2d(int(planes* self.expansion), int(planes* self.expansion), kernel_size=1, stride=1, padding=0, bias=False, groups=32)
self.add_module('sig', nn.Sigmoid())
# recurrent part
for i in range(0, self.iter):
layers = []
layers_g_bn = []
layers.append(nn.BatchNorm2d(planes*self.expansion))
layers.append(nn.ReLU(inplace=True))
conv_1 = nn.Conv2d(int(planes*self.expansion), planes, kernel_size=1, stride=1, padding=0, bias=False)
init.kaiming_normal_(conv_1.weight)
layers.append(conv_1)
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
if SKconv:
layers.append(SKConv(planes, planes))
else:
layers.append(nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False))
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
conv_2 = nn.Conv2d(planes, int(planes*self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
init.kaiming_normal_(conv_2.weight)
layers.append(conv_2)
layers_g_bn.append(nn.BatchNorm2d(int(planes*self.expansion)))
layers_g_bn.append(nn.ReLU(inplace=True))
self.add_module('iter_'+str(i+1), nn.Sequential(*layers))
self.add_module('iter_g_'+str(i+1), nn.Sequential(*layers_g_bn))
self.downsample = downsample
if self.downsample:
self.add_module('d_bn', nn.BatchNorm2d(planes * self.expansion))
self.add_module('d_relu', nn.ReLU(inplace=True))
d_conv = nn.Conv2d(int(planes* self.expansion), int(planes* self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
init.kaiming_normal_(d_conv.weight)
self.add_module('d_conv', d_conv)
self.add_module('d_ave', nn.AvgPool2d((2, 2), stride=2))
self.add_module('d_bn_1', nn.BatchNorm2d(planes * self.expansion))
self.add_module('d_relu_1', nn.ReLU(inplace=True))
d_conv_1 = nn.Conv2d(int(planes* self.expansion), planes, kernel_size=1, stride=1, padding=0,
bias=False)
init.kaiming_normal_(d_conv_1.weight)
self.add_module('d_conv_1', d_conv_1)
self.add_module('d_bn_3', nn.BatchNorm2d(planes))
self.add_module('d_relu_3', nn.ReLU(inplace=True))
if SKconv:
d_conv_3 = SKConv(planes, planes, stride=2)
self.add_module('d_conv_3', d_conv_3)
else:
d_conv_3 = nn.Conv2d(planes, planes, kernel_size=3, stride=2, padding=1, bias=False)
init.kaiming_normal_(d_conv_3.weight)
self.add_module('d_conv_3', d_conv_3)
d_conv_1e = nn.Conv2d(planes, int(planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
init.kaiming_normal_(d_conv_1e.weight)
self.add_module('d_conv_1e', d_conv_1e)
def forward(self, x):
# feed-forward
x_bn = self.bn_f(x)
x_act = self.relu_f(x_bn)
x_s = self.conv_f(x_act)
x_g_bn = self.bn_g_f(x)
x_g_act = self.relu_g_f(x_g_bn)
x_g_s = self.conv_g_f(x_g_act)
# recurrent
for i in range(0, self.iter):
x_g_r = self.conv_g_r(self.__dict__['_modules']["iter_g_%s" % str(i+1)](x_s))
x_s = self.__dict__['_modules']["iter_%s" % str(i+1)](x_s) * torch.sigmoid(x_g_r + x_g_s) + x_s
if self.downsample:
x_s_1 = self.d_conv(self.d_ave(self.d_relu(self.d_bn(x_s))))
x_s_2 = self.d_conv_1e(self.d_conv_3(self.d_relu_3(self.d_bn_3(self.d_conv_1(self.d_relu_1(self.d_bn_1(x_s)))))))
x_s = x_s_1 + x_s_2
return x_s
class GRCNN(nn.Module):
def __init__(self, iters, maps, SKconv, expansion, num_classes):
""" Args:
iters:iterations.
num_classes: number of classes
"""
super(GRCNN, self).__init__()
self.iters = iters
self.maps = maps
self.num_classes = num_classes
self.expansion = expansion
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
init.kaiming_normal_(self.conv1.weight)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1,
bias=False)
init.kaiming_normal_(self.conv2.weight)
self.layer1 = GRCL(64, self.maps[0], True, self.iters[0], SKconv, self.expansion)
self.layer2 = GRCL(self.maps[0] * self.expansion, self.maps[1], True, self.iters[1], SKconv, self.expansion)
self.layer3 = GRCL(self.maps[1] * self.expansion, self.maps[2], True, self.iters[2], SKconv, self.expansion)
self.layer4 = GRCL(self.maps[2] * self.expansion, self.maps[3], False, self.iters[3], SKconv, self.expansion)
self.lastact = nn.Sequential(nn.BatchNorm2d(self.maps[3]*self.expansion), nn.ReLU(inplace=True))
self.avgpool = nn.AvgPool2d(7)
self.classifier = nn.Linear(self.maps[3] * self.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
init.ones_(m.weight)
init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
init.zeros_(m.bias)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.lastact(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def skgrcnn55(num_classes=1000):
"""
Args:
num_classes (uint): number of classes
"""
model = GRCNN([3, 3, 4, 3], [128, 256, 512, 1024], SKconv=True, expansion=2, num_classes=num_classes)
return model
def skgrcnn109(num_classes=1000):
"""
Args:
num_classes (uint): number of classes
"""
model = GRCNN([3, 3, 22, 3], [128, 256, 512, 1024], SKconv=True, expansion=2, num_classes=num_classes)
return model
def grcnn55(num_classes=1000):
"""
Args:
num_classes (uint): number of classes
"""
model = GRCNN([3, 3, 4, 3], [64, 128, 256, 512], SKconv=False, expansion=4, num_classes=num_classes)
return model
def grcnn109(num_classes=1000):
"""
Args:
num_classes (uint): number of classes
"""
model = GRCNN([3, 3, 22, 3], [64, 128, 256, 512], SKconv=False, expansion=4, num_classes=num_classes)
return model
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Sigmoid",
"torch.nn.Softmax",
"torch.nn.init.ones_",
"torch.nn.ModuleList",
"functools.reduce",
"torch.nn.Sequential",
"torch.sigmoid",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.zeros_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_"
] | [((453, 468), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (466, 468), True, 'import torch.nn as nn\n'), ((875, 898), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (895, 898), True, 'import torch.nn as nn\n'), ((918, 959), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'd', '(1)'], {'bias': '(False)'}), '(out_channels, d, 1, bias=False)\n', (927, 959), True, 'import torch.nn as nn\n'), ((965, 1003), 'torch.nn.init.normal_', 'init.normal_', (['conv_fc.weight'], {'std': '(0.01)'}), '(conv_fc.weight, std=0.01)\n', (977, 1003), False, 'from torch.nn import init\n'), ((1168, 1216), 'torch.nn.Conv2d', 'nn.Conv2d', (['d', '(out_channels * M)', '(1)', '(1)'], {'bias': '(False)'}), '(d, out_channels * M, 1, 1, bias=False)\n', (1177, 1216), True, 'import torch.nn as nn\n'), ((1219, 1258), 'torch.nn.init.normal_', 'init.normal_', (['self.fc2.weight'], {'std': '(0.01)'}), '(self.fc2.weight, std=0.01)\n', (1231, 1258), False, 'from torch.nn import init\n'), ((1280, 1297), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1290, 1297), True, 'import torch.nn as nn\n'), ((1474, 1508), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'output'], {}), '(lambda x, y: x + y, output)\n', (1480, 1508), False, 'from functools import reduce\n'), ((1860, 1889), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'V'], {}), '(lambda x, y: x + y, V)\n', (1866, 1889), False, 'from functools import reduce\n'), ((2362, 2397), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['conv_f.weight'], {}), '(conv_f.weight)\n', (2382, 2397), False, 'from torch.nn import init\n'), ((2678, 2717), 'torch.nn.init.normal_', 'init.normal_', (['conv_g_f.weight'], {'std': '(0.01)'}), '(conv_g_f.weight, std=0.01)\n', (2690, 2717), False, 'from torch.nn import init\n'), ((6626, 6690), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (6635, 6690), True, 'import torch.nn as nn\n'), ((6727, 6766), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['self.conv1.weight'], {}), '(self.conv1.weight)\n', (6747, 6766), False, 'from torch.nn import init\n'), ((6783, 6801), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6797, 6801), True, 'import torch.nn as nn\n'), ((6818, 6839), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6825, 6839), True, 'import torch.nn as nn\n'), ((6859, 6907), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (6871, 6907), True, 'import torch.nn as nn\n'), ((6926, 6991), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=False)\n', (6935, 6991), True, 'import torch.nn as nn\n'), ((7028, 7067), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['self.conv2.weight'], {}), '(self.conv2.weight)\n', (7048, 7067), False, 'from torch.nn import init\n'), ((7620, 7635), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(7)'], {}), '(7)\n', (7632, 7635), True, 'import torch.nn as nn\n'), ((7658, 7711), 'torch.nn.Linear', 'nn.Linear', (['(self.maps[3] * self.expansion)', 'num_classes'], {}), '(self.maps[3] * self.expansion, num_classes)\n', (7667, 7711), True, 'import torch.nn as nn\n'), ((518, 627), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', 'stride'], {'padding': '(1 + i)', 'dilation': '(1 + i)', 'groups': 'groups', 'bias': '(False)'}), '(in_channels, out_channels, 3, stride, padding=1 + i, dilation=1 +\n i, groups=groups, bias=False)\n', (527, 627), True, 'import torch.nn as nn\n'), ((625, 659), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['conv1.weight'], {}), '(conv1.weight)\n', (645, 659), False, 'from torch.nn import init\n'), ((1075, 1092), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['d'], {}), '(d)\n', (1089, 1092), True, 'import torch.nn as nn\n'), ((1125, 1146), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1132, 1146), True, 'import torch.nn as nn\n'), ((2158, 2182), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['inplanes'], {}), '(inplanes)\n', (2172, 2182), True, 'import torch.nn as nn\n'), ((2214, 2235), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2221, 2235), True, 'import torch.nn as nn\n'), ((2471, 2495), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['inplanes'], {}), '(inplanes)\n', (2485, 2495), True, 'import torch.nn as nn\n'), ((2529, 2550), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2536, 2550), True, 'import torch.nn as nn\n'), ((2934, 2946), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2944, 2946), True, 'import torch.nn as nn\n'), ((3261, 3296), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['conv_1.weight'], {}), '(conv_1.weight)\n', (3281, 3296), False, 'from torch.nn import init\n'), ((3785, 3820), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['conv_2.weight'], {}), '(conv_2.weight)\n', (3805, 3820), False, 'from torch.nn import init\n'), ((4424, 4459), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['d_conv.weight'], {}), '(d_conv.weight)\n', (4444, 4459), False, 'from torch.nn import init\n'), ((4827, 4864), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['d_conv_1.weight'], {}), '(d_conv_1.weight)\n', (4847, 4864), False, 'from torch.nn import init\n'), ((5476, 5514), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['d_conv_1e.weight'], {}), '(d_conv_1e.weight)\n', (5496, 5514), False, 'from torch.nn import init\n'), ((7533, 7578), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.maps[3] * self.expansion)'], {}), '(self.maps[3] * self.expansion)\n', (7547, 7578), True, 'import torch.nn as nn\n'), ((7578, 7599), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7585, 7599), True, 'import torch.nn as nn\n'), ((3067, 3106), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (3081, 3106), True, 'import torch.nn as nn\n'), ((3125, 3146), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3132, 3146), True, 'import torch.nn as nn\n'), ((3344, 3366), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (3358, 3366), True, 'import torch.nn as nn\n'), ((3387, 3408), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3394, 3408), True, 'import torch.nn as nn\n'), ((3941, 3962), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3948, 3962), True, 'import torch.nn as nn\n'), ((4005, 4027), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (4018, 4027), True, 'import torch.nn as nn\n'), ((4070, 4097), 'torch.nn.Sequential', 'nn.Sequential', (['*layers_g_bn'], {}), '(*layers_g_bn)\n', (4083, 4097), True, 'import torch.nn as nn\n'), ((4188, 4227), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (4202, 4227), True, 'import torch.nn as nn\n'), ((4262, 4283), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4269, 4283), True, 'import torch.nn as nn\n'), ((4533, 4563), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2, 2)'], {'stride': '(2)'}), '((2, 2), stride=2)\n', (4545, 4563), True, 'import torch.nn as nn\n'), ((4601, 4640), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (4615, 4640), True, 'import torch.nn as nn\n'), ((4677, 4698), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4684, 4698), True, 'import torch.nn as nn\n'), ((4944, 4966), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (4958, 4966), True, 'import torch.nn as nn\n'), ((5003, 5024), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5010, 5024), True, 'import torch.nn as nn\n'), ((5185, 5258), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=2, padding=1, bias=False)\n', (5194, 5258), True, 'import torch.nn as nn\n'), ((5268, 5305), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['d_conv_3.weight'], {}), '(d_conv_3.weight)\n', (5288, 5305), False, 'from torch.nn import init\n'), ((753, 781), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (767, 781), True, 'import torch.nn as nn\n'), ((826, 847), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (833, 847), True, 'import torch.nn as nn\n'), ((3504, 3577), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n', (3513, 3577), True, 'import torch.nn as nn\n'), ((3600, 3622), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (3614, 3622), True, 'import torch.nn as nn\n'), ((3645, 3666), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3652, 3666), True, 'import torch.nn as nn\n'), ((5998, 6026), 'torch.sigmoid', 'torch.sigmoid', (['(x_g_r + x_g_s)'], {}), '(x_g_r + x_g_s)\n', (6011, 6026), False, 'import torch\n'), ((7819, 7838), 'torch.nn.init.zeros_', 'init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7830, 7838), False, 'from torch.nn import init\n'), ((7889, 7909), 'torch.nn.init.ones_', 'init.ones_', (['m.weight'], {}), '(m.weight)\n', (7899, 7909), False, 'from torch.nn import init\n'), ((7918, 7937), 'torch.nn.init.zeros_', 'init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7929, 7937), False, 'from torch.nn import init\n'), ((7983, 8013), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (8003, 8013), False, 'from torch.nn import init\n'), ((8022, 8041), 'torch.nn.init.zeros_', 'init.zeros_', (['m.bias'], {}), '(m.bias)\n', (8033, 8041), False, 'from torch.nn import init\n')] |
import re
import time
import requests
from invoke import task
from os import makedirs
from os.path import basename, join
from pprint import pprint
from hoststats.client import HostStats
from tasks.util.env import (
RESULTS_DIR,
)
from tasks.util.faasm import (
get_knative_headers,
get_faasm_worker_pods,
get_faasm_invoke_host_port,
)
from tasks.util.openmpi import (
NATIVE_HOSTFILE,
get_native_mpi_namespace,
get_native_mpi_pods,
run_kubectl_cmd,
)
from tasks.lammps.env import (
DOCKER_LAMMPS_BINARY,
DOCKER_LAMMPS_DIR,
LAMMPS_FAASM_USER,
LAMMPS_FAASM_FUNC,
get_faasm_benchmark,
)
def _init_csv_file(csv_name):
result_dir = join(RESULTS_DIR, "lammps")
makedirs(result_dir, exist_ok=True)
result_file = join(result_dir, csv_name)
makedirs(RESULTS_DIR, exist_ok=True)
with open(result_file, "w") as out_file:
out_file.write("WorldSize,Run,Reported,Actual\n")
return result_file
def _process_lammps_result(
lammps_output, result_file, num_procs, run_num, actual_time
):
print("Processing lammps output: \n{}\n".format(lammps_output))
reported_time = re.findall("Total wall time: ([0-9:]*)", lammps_output)
if len(reported_time) != 1:
print(
"Got {} matches for reported time, expected 1 from: \n{}".format(
len(reported_time), lammps_output
)
)
exit(1)
reported_time = reported_time[0].split(":")
reported_time = (
int(reported_time[0]) * 3600
+ int(reported_time[1]) * 60
+ int(reported_time[2])
)
with open(result_file, "a") as out_file:
out_file.write(
"{},{},{},{:.2f}\n".format(
num_procs, run_num, reported_time, actual_time
)
)
@task
def faasm(ctx, bench, repeats=1, nprocs=None, procrange=None):
"""
Run LAMMPS experiment on Faasm
"""
_bench = get_faasm_benchmark(bench)
result_file = _init_csv_file(
"lammps_wasm_{}.csv".format(_bench["out_file"])
)
if nprocs:
num_procs = [nprocs]
elif procrange:
num_procs = range(1, int(procrange) + 1)
else:
num_procs = NUM_PROCS
host, port = get_faasm_invoke_host_port()
pod_names = get_faasm_worker_pods()
stats = HostStats(
pod_names,
kubectl=True,
kubectl_container="user-container",
kubectl_ns="faasm",
)
for np in num_procs:
print("Running on Faasm with {} MPI processes".format(np))
for run_num in range(repeats):
stats_csv = join(
RESULTS_DIR,
"lammps",
"hoststats_wasm_{}_{}_{}.csv".format(
_bench["out_file"], np, run_num
),
)
start = time.time()
stats.start_collection()
file_name = basename(_bench["data"][0])
cmdline = "-in faasm://lammps-data/{}".format(file_name)
url = "http://{}:{}".format(host, port)
msg = {
"user": LAMMPS_FAASM_USER,
"function": LAMMPS_FAASM_FUNC,
"cmdline": cmdline,
"mpi_world_size": int(np),
"async": True,
}
print("Posting to {} msg:".format(msg, url))
pprint(msg)
# Post asynch request
knative_headers = get_knative_headers()
response = requests.post(
url, json=msg, headers=knative_headers, timeout=None
)
# Get the async message id
if response.status_code != 200:
print(
"Initial request failed: {}:\n{}".format(
response.status_code, response.text
)
)
print("Response: {}".format(response.text))
msg_id = int(response.text.strip())
# Start polling for the result
print("Polling message {}".format(msg_id))
while True:
interval = 2
time.sleep(interval)
status_msg = {
"user": LAMMPS_FAASM_USER,
"function": LAMMPS_FAASM_FUNC,
"status": True,
"id": msg_id,
}
response = requests.post(
url,
json=status_msg,
headers=knative_headers,
)
print(response.text)
if response.text.startswith("SUCCESS"):
actual_time = time.time() - start
break
elif response.text.startswith("RUNNING"):
continue
elif response.text.startswith("FAILED"):
raise RuntimeError("Call failed")
elif not response.text:
raise RuntimeError("Empty status response")
else:
raise RuntimeError(
"Unexpected status response: {}".format(response.text)
)
stats.stop_and_write_to_csv(stats_csv)
_process_lammps_result(
response.text, result_file, np, run_num, actual_time
)
print("Results written to {}".format(result_file))
@task
def native(ctx, bench, repeats=1, nprocs=None, procrange=None):
"""
Run LAMMPS experiment on OpenMPI
"""
_bench = get_faasm_benchmark(bench)
result_file = _init_csv_file(
"lammps_native_{}.csv".format(_bench["out_file"])
)
if nprocs:
num_procs = [nprocs]
elif procrange:
num_procs = range(1, int(procrange) + 1)
else:
num_procs = NUM_PROCS
namespace = get_native_mpi_namespace("lammps")
pod_names, _ = get_native_mpi_pods("lammps")
master_pod = pod_names[0]
stats = HostStats(pod_names, kubectl=True, kubectl_ns=namespace)
for np in num_procs:
print("Running natively with {} MPI processes".format(np))
print("Chosen pod {} as master".format(master_pod))
for run_num in range(repeats):
stats_csv = join(
RESULTS_DIR,
"lammps",
"hoststats_native_{}_{}_{}.csv".format(
_bench["out_file"], np, run_num
),
)
start = time.time()
stats.start_collection()
native_cmdline = "-in {}/{}.faasm.native".format(
DOCKER_LAMMPS_DIR, _bench["data"][0]
)
mpirun_cmd = [
"mpirun",
"-np {}".format(np),
"-hostfile {}".format(NATIVE_HOSTFILE),
DOCKER_LAMMPS_BINARY,
native_cmdline,
]
mpirun_cmd = " ".join(mpirun_cmd)
exec_cmd = [
"exec",
master_pod,
"--",
"su mpirun -c '{}'".format(mpirun_cmd),
]
exec_output = run_kubectl_cmd("lammps", " ".join(exec_cmd))
print(exec_output)
end = time.time()
actual_time = end - start
stats.stop_and_write_to_csv(stats_csv)
_process_lammps_result(
exec_output, result_file, np, run_num, actual_time
)
| [
"requests.post",
"tasks.util.faasm.get_knative_headers",
"os.makedirs",
"tasks.lammps.env.get_faasm_benchmark",
"tasks.util.faasm.get_faasm_invoke_host_port",
"os.path.join",
"time.sleep",
"tasks.util.openmpi.get_native_mpi_pods",
"tasks.util.faasm.get_faasm_worker_pods",
"os.path.basename",
"hoststats.client.HostStats",
"re.findall",
"tasks.util.openmpi.get_native_mpi_namespace",
"time.time",
"pprint.pprint"
] | [((687, 714), 'os.path.join', 'join', (['RESULTS_DIR', '"""lammps"""'], {}), "(RESULTS_DIR, 'lammps')\n", (691, 714), False, 'from os.path import basename, join\n'), ((719, 754), 'os.makedirs', 'makedirs', (['result_dir'], {'exist_ok': '(True)'}), '(result_dir, exist_ok=True)\n', (727, 754), False, 'from os import makedirs\n'), ((774, 800), 'os.path.join', 'join', (['result_dir', 'csv_name'], {}), '(result_dir, csv_name)\n', (778, 800), False, 'from os.path import basename, join\n'), ((805, 841), 'os.makedirs', 'makedirs', (['RESULTS_DIR'], {'exist_ok': '(True)'}), '(RESULTS_DIR, exist_ok=True)\n', (813, 841), False, 'from os import makedirs\n'), ((1155, 1210), 're.findall', 're.findall', (['"""Total wall time: ([0-9:]*)"""', 'lammps_output'], {}), "('Total wall time: ([0-9:]*)', lammps_output)\n", (1165, 1210), False, 'import re\n'), ((1942, 1968), 'tasks.lammps.env.get_faasm_benchmark', 'get_faasm_benchmark', (['bench'], {}), '(bench)\n', (1961, 1968), False, 'from tasks.lammps.env import DOCKER_LAMMPS_BINARY, DOCKER_LAMMPS_DIR, LAMMPS_FAASM_USER, LAMMPS_FAASM_FUNC, get_faasm_benchmark\n'), ((2238, 2266), 'tasks.util.faasm.get_faasm_invoke_host_port', 'get_faasm_invoke_host_port', ([], {}), '()\n', (2264, 2266), False, 'from tasks.util.faasm import get_knative_headers, get_faasm_worker_pods, get_faasm_invoke_host_port\n'), ((2284, 2307), 'tasks.util.faasm.get_faasm_worker_pods', 'get_faasm_worker_pods', ([], {}), '()\n', (2305, 2307), False, 'from tasks.util.faasm import get_knative_headers, get_faasm_worker_pods, get_faasm_invoke_host_port\n'), ((2320, 2414), 'hoststats.client.HostStats', 'HostStats', (['pod_names'], {'kubectl': '(True)', 'kubectl_container': '"""user-container"""', 'kubectl_ns': '"""faasm"""'}), "(pod_names, kubectl=True, kubectl_container='user-container',\n kubectl_ns='faasm')\n", (2329, 2414), False, 'from hoststats.client import HostStats\n'), ((5525, 5551), 'tasks.lammps.env.get_faasm_benchmark', 'get_faasm_benchmark', (['bench'], {}), '(bench)\n', (5544, 5551), False, 'from tasks.lammps.env import DOCKER_LAMMPS_BINARY, DOCKER_LAMMPS_DIR, LAMMPS_FAASM_USER, LAMMPS_FAASM_FUNC, get_faasm_benchmark\n'), ((5822, 5856), 'tasks.util.openmpi.get_native_mpi_namespace', 'get_native_mpi_namespace', (['"""lammps"""'], {}), "('lammps')\n", (5846, 5856), False, 'from tasks.util.openmpi import NATIVE_HOSTFILE, get_native_mpi_namespace, get_native_mpi_pods, run_kubectl_cmd\n'), ((5876, 5905), 'tasks.util.openmpi.get_native_mpi_pods', 'get_native_mpi_pods', (['"""lammps"""'], {}), "('lammps')\n", (5895, 5905), False, 'from tasks.util.openmpi import NATIVE_HOSTFILE, get_native_mpi_namespace, get_native_mpi_pods, run_kubectl_cmd\n'), ((5948, 6004), 'hoststats.client.HostStats', 'HostStats', (['pod_names'], {'kubectl': '(True)', 'kubectl_ns': 'namespace'}), '(pod_names, kubectl=True, kubectl_ns=namespace)\n', (5957, 6004), False, 'from hoststats.client import HostStats\n'), ((2828, 2839), 'time.time', 'time.time', ([], {}), '()\n', (2837, 2839), False, 'import time\n'), ((2902, 2929), 'os.path.basename', 'basename', (["_bench['data'][0]"], {}), "(_bench['data'][0])\n", (2910, 2929), False, 'from os.path import basename, join\n'), ((3354, 3365), 'pprint.pprint', 'pprint', (['msg'], {}), '(msg)\n', (3360, 3365), False, 'from pprint import pprint\n'), ((3431, 3452), 'tasks.util.faasm.get_knative_headers', 'get_knative_headers', ([], {}), '()\n', (3450, 3452), False, 'from tasks.util.faasm import get_knative_headers, get_faasm_worker_pods, get_faasm_invoke_host_port\n'), ((3476, 3543), 'requests.post', 'requests.post', (['url'], {'json': 'msg', 'headers': 'knative_headers', 'timeout': 'None'}), '(url, json=msg, headers=knative_headers, timeout=None)\n', (3489, 3543), False, 'import requests\n'), ((6445, 6456), 'time.time', 'time.time', ([], {}), '()\n', (6454, 6456), False, 'import time\n'), ((7192, 7203), 'time.time', 'time.time', ([], {}), '()\n', (7201, 7203), False, 'import time\n'), ((4114, 4134), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (4124, 4134), False, 'import time\n'), ((4380, 4440), 'requests.post', 'requests.post', (['url'], {'json': 'status_msg', 'headers': 'knative_headers'}), '(url, json=status_msg, headers=knative_headers)\n', (4393, 4440), False, 'import requests\n'), ((4648, 4659), 'time.time', 'time.time', ([], {}), '()\n', (4657, 4659), False, 'import time\n')] |
# Generated by Django 3.1.4 on 2021-05-29 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AppHome', '0002_auto_20210529_1957'),
]
operations = [
migrations.AddField(
model_name='amazeusersorders',
name='orderId',
field=models.IntegerField(default=0),
),
]
| [
"django.db.models.IntegerField"
] | [((345, 375), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (364, 375), False, 'from django.db import migrations, models\n')] |
"""
.. module:: ft_server.server
"""
import json
import fastText
import os
from flask import Flask
from flask import g
from flask import jsonify
from flask import request
app = Flask(__name__)
if os.environ.get('FT_SERVER_ENV') == "development":
app.config.from_object('config.DevelopmentConfig')
elif os.environ.get('FT_SERVER_ENV') == "test":
app.config.from_object('config.TestConfig')
else:
app.config.from_object('config.Config')
if os.environ.get('FT_SERVER_DOCKERIZED') == "True":
app.config.from_object('config.DockerConfig')
if os.environ.get('FT_SERVER_SETTINGS'):
app.config.from_envvar('FT_SERVER_SETTINGS')
@app.route("/")
def index():
"""Retrieve information about the deployed model."""
model_args = g.ft_model.f.getArgs()
res = {
"lr": model_args.lr,
"lrUpdateRate": model_args.lrUpdateRate,
"epoch": model_args.epoch,
"dim": model_args.dim,
"ws": model_args.ws,
"model": str(model_args.model)[len("model_name."):],
"loss": str(model_args.loss)[len("loss_name."):],
"wordNgrams": model_args.wordNgrams,
"minCountLabel": model_args.minCountLabel,
"label": model_args.label,
"thread": model_args.thread,
"bucket": model_args.bucket,
"cutoff": model_args.cutoff,
"t": model_args.t,
"minn": model_args.minn,
"maxn": model_args.maxn,
"isQuant": g.ft_model.f.isQuant()
}
return jsonify(res)
@app.route("/words")
def words():
"""
Get dictionary used for model training.
Query String:
* freq (bool): Whether or not to return frequencies (default: False)
Returns:
A json array containing words and optionally their frequencies.
"""
freq = bool(request.args.get('freq')) if request.args.get('freq') == "True" else False
words, counts = g.ft_model.get_words(freq)
res = [{"word": w, "count": int(c)} for w, c in zip(words, counts)]
return jsonify(res)
@app.route("/predictions", methods=["GET"], endpoint="get_predictions")
def predictions():
"""
Retrieve predictions for a single word or sentence from the deployed model.
Query String:
* q (str): word or sentence to get a vector representation for.
* k (int): Number of most likely classes returned (default: 1)
* threshold (float): Filter classes with a probability below threshold (default: 0.0)
Returns:
A json containing the vector representations.
"""
k = int(request.args.get('k')) if request.args.get('k') else 1
threshold = float(request.args.get('threshold')) if request.args.get('threshold') else 0.0
query = request.args.get('q')
res = make_prediction(query, k, threshold)
return jsonify(res)
@app.route("/predictions", methods=["POST"], endpoint="post_predictions")
def predictions():
"""
Retrieve predictions for words or sentences from the deployed model.
Query String:
* k (int): Number of most likely classes returned (default: 1)
* threshold (float): Filter classes with a probability below threshold (default: 0.0)
Body:
A json array of strings to get classifications.
Returns:
A json array containing the vector representations.
"""
k = int(request.args.get('k')) if request.args.get('k') else 1
threshold = float(request.args.get('threshold')) if request.args.get('threshold') else 0.0
queries = json.loads(request.data)
res = [make_prediction(el, k, threshold) for el in queries]
return jsonify(res)
@app.route("/representations", methods=["GET"], endpoint="get_representations")
def representations():
"""
Retrieve vector representations for a single word or sentence from the deployed model.
Query String:
q (str): word or sentence to get a vector representation for.
Returns:
A json containing the vector representation and it's dimensionality.
"""
query = request.args.get('q')
res = retrieve_representation(query)
return jsonify(res)
@app.route("/representations", methods=["POST"], endpoint="post_representations")
def representations():
"""
Retrieve vector representations for a single word or sentence from the deployed model.
Body:
A json array of strings.
Returns:
A json array containing the vector representations.
"""
queries = json.loads(request.data)
res = [retrieve_representation(el) for el in queries]
return jsonify(res)
@app.before_request
def before_request():
g.ft_model = fastText.load_model(app.config["FT_SERVER_MODEL_PATH"])
def retrieve_representation(q):
retrieval_method = g.ft_model.get_word_vector
if len(q.split(" ")) > 1:
retrieval_method = g.ft_model.get_sentence_vector
res = {
"q": q,
"representation": retrieval_method(q).tolist(),
"dim": len(retrieval_method(q))
}
return res
def make_prediction(q, k, threshold):
labels, probas = g.ft_model.predict(q, k, threshold)
return [{"label": l, "proba": p} for l, p in zip(labels, probas)]
if __name__ == '__main__':
app.run(host=app.config["HOST"], port=app.config["PORT"], debug=app.config["DEBUG"])
| [
"flask.request.args.get",
"json.loads",
"flask.g.ft_model.get_words",
"flask.Flask",
"flask.jsonify",
"fastText.load_model",
"flask.g.ft_model.predict",
"os.environ.get",
"flask.g.ft_model.f.isQuant",
"flask.g.ft_model.f.getArgs"
] | [((181, 196), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'from flask import Flask\n'), ((560, 596), 'os.environ.get', 'os.environ.get', (['"""FT_SERVER_SETTINGS"""'], {}), "('FT_SERVER_SETTINGS')\n", (574, 596), False, 'import os\n'), ((201, 232), 'os.environ.get', 'os.environ.get', (['"""FT_SERVER_ENV"""'], {}), "('FT_SERVER_ENV')\n", (215, 232), False, 'import os\n'), ((456, 494), 'os.environ.get', 'os.environ.get', (['"""FT_SERVER_DOCKERIZED"""'], {}), "('FT_SERVER_DOCKERIZED')\n", (470, 494), False, 'import os\n'), ((752, 774), 'flask.g.ft_model.f.getArgs', 'g.ft_model.f.getArgs', ([], {}), '()\n', (772, 774), False, 'from flask import g\n'), ((1475, 1487), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (1482, 1487), False, 'from flask import jsonify\n'), ((1888, 1914), 'flask.g.ft_model.get_words', 'g.ft_model.get_words', (['freq'], {}), '(freq)\n', (1908, 1914), False, 'from flask import g\n'), ((1999, 2011), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (2006, 2011), False, 'from flask import jsonify\n'), ((2721, 2742), 'flask.request.args.get', 'request.args.get', (['"""q"""'], {}), "('q')\n", (2737, 2742), False, 'from flask import request\n'), ((2802, 2814), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (2809, 2814), False, 'from flask import jsonify\n'), ((3525, 3549), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3535, 3549), False, 'import json\n'), ((3626, 3638), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (3633, 3638), False, 'from flask import jsonify\n'), ((4043, 4064), 'flask.request.args.get', 'request.args.get', (['"""q"""'], {}), "('q')\n", (4059, 4064), False, 'from flask import request\n'), ((4118, 4130), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (4125, 4130), False, 'from flask import jsonify\n'), ((4494, 4518), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (4504, 4518), False, 'import json\n'), ((4589, 4601), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (4596, 4601), False, 'from flask import jsonify\n'), ((4663, 4718), 'fastText.load_model', 'fastText.load_model', (["app.config['FT_SERVER_MODEL_PATH']"], {}), "(app.config['FT_SERVER_MODEL_PATH'])\n", (4682, 4718), False, 'import fastText\n'), ((5100, 5135), 'flask.g.ft_model.predict', 'g.ft_model.predict', (['q', 'k', 'threshold'], {}), '(q, k, threshold)\n', (5118, 5135), False, 'from flask import g\n'), ((311, 342), 'os.environ.get', 'os.environ.get', (['"""FT_SERVER_ENV"""'], {}), "('FT_SERVER_ENV')\n", (325, 342), False, 'import os\n'), ((1434, 1456), 'flask.g.ft_model.f.isQuant', 'g.ft_model.f.isQuant', ([], {}), '()\n', (1454, 1456), False, 'from flask import g\n'), ((2584, 2605), 'flask.request.args.get', 'request.args.get', (['"""k"""'], {}), "('k')\n", (2600, 2605), False, 'from flask import request\n'), ((2669, 2698), 'flask.request.args.get', 'request.args.get', (['"""threshold"""'], {}), "('threshold')\n", (2685, 2698), False, 'from flask import request\n'), ((3386, 3407), 'flask.request.args.get', 'request.args.get', (['"""k"""'], {}), "('k')\n", (3402, 3407), False, 'from flask import request\n'), ((3471, 3500), 'flask.request.args.get', 'request.args.get', (['"""threshold"""'], {}), "('threshold')\n", (3487, 3500), False, 'from flask import request\n'), ((1821, 1845), 'flask.request.args.get', 'request.args.get', (['"""freq"""'], {}), "('freq')\n", (1837, 1845), False, 'from flask import request\n'), ((1792, 1816), 'flask.request.args.get', 'request.args.get', (['"""freq"""'], {}), "('freq')\n", (1808, 1816), False, 'from flask import request\n'), ((2558, 2579), 'flask.request.args.get', 'request.args.get', (['"""k"""'], {}), "('k')\n", (2574, 2579), False, 'from flask import request\n'), ((2635, 2664), 'flask.request.args.get', 'request.args.get', (['"""threshold"""'], {}), "('threshold')\n", (2651, 2664), False, 'from flask import request\n'), ((3360, 3381), 'flask.request.args.get', 'request.args.get', (['"""k"""'], {}), "('k')\n", (3376, 3381), False, 'from flask import request\n'), ((3437, 3466), 'flask.request.args.get', 'request.args.get', (['"""threshold"""'], {}), "('threshold')\n", (3453, 3466), False, 'from flask import request\n')] |
#!/usr/bin/env python
import os
import re
import io
import subprocess
import sys
import time
import smtplib
import glob
import shutil
import traceback
import zipfile
import platform
import stat
PLATFORMS = [ "x64-osx", "x86-ubuntu", "x64-ubuntu", "x64-debian", "x86-win", "x64-win" ]
REQUIRE_ALL_PLATFORMS = True
FSTAR_BIN_URL = "https://github.com/FStarLang/binaries.git"
FSTAR_BIN_LOCAL = os.path.join("nightly", "fstar-binaries")
FSTAR_BIN_SUBDIR = "z3-tested"
FSTAR_BIN_RBRANCH = "origin/master"
Z3_BIN_URL = "https://github.com/Z3Prover/bin.git"
Z3_BIN_LOCAL = os.path.join("nightly", "z3-binaries")
Z3_BIN_SUBDIR = "nightly"
Z3_BIN_RBRANCH = "origin/master"
Z3_PKG_NAME_PAT = re.compile("^z3-([0-9].[0-9].[0-9]).([a-z0-9]{12})-(x86|x64)-([a-zA-Z]*)-?([\.0-9]*).zip$")
Z3_DIR = os.path.join("nightly", "z3")
class Z3NightlyException(Exception):
pass
def get_platform():
z3bn = "z3"
s = platform.system()
a, fmt = platform.architecture()
z3a = "x64" if a == "64bit" else "x86"
if s == "Windows" or s.startswith("CYGWIN") or s.startswith("MSYS"):
z3bn = "z3.exe"
z3s = "win"
elif s == "Linux":
d, v, nn = platform.linux_distribution()
if d == "Ubuntu":
z3s = "ubuntu"
elif d == "Debian":
z3s = "debian"
else:
print("Warning: unknown linux distribution '%s', assuming Ubuntu." % d)
z3s = "ubuntu"
elif s == "Darwin":
z3s = "osx"
else:
print("Warning: unknown system '%s', assuming Ubuntu." % s)
return "ubuntu", "x64"
return z3bn, z3s, z3a
def mk_args(cmd):
css = cmd.split(" ")
in_string = False
cs = []
cur = ""
for i in css:
if not in_string and i.startswith("\"") and i.endswith("\""):
cs = cs + [i[1:-1]]
elif not in_string and i.startswith("\""):
in_string = True
cur = i[1:]
elif in_string and i.endswith("\""):
in_string = False
cur = cur + " " + i
cs = cs + [cur[:-1]]
elif in_string:
cur = cur + " " + i
elif i == "":
pass
else:
cs = cs + [i]
return cs
def call_logged(cmd, log, checked=True):
cs = mk_args(cmd)
# log.write(">>>>> " + " ".join(cs) + " <<<<<\n")
ec = subprocess.call(cs, stdin=None, stdout=log, stderr=log)
log.flush()
if (checked and ec != 0):
log.write("Error code: %d\n" % ec)
raise Z3NightlyException("Command failed.")
def call_with_output(cmd):
cs = mk_args(cmd)
# log.write(">>>>> " + " ".join(cs) + " <<<<<\n")
p = subprocess.Popen(cs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err is None or err == "":
return out
else:
return out + " " + err
def update_git(url, branch, dir, subdir, log, quiet=False):
q = "--quiet" if quiet else ""
v = "--verbose" if not quiet else ""
if not os.path.isdir(dir):
call_logged("git clone %s %s %s" % (q, url, dir), log)
else:
prev_wd = os.getcwd()
os.chdir(dir)
call_logged("git reset %s --hard %s" % (q, branch), log)
call_logged("git clean %s -f" % (q), log)
call_logged("git pull %s -s recursive -Xtheirs" % (q), log)
call_logged("git reset %s --hard %s" % (q, branch), log)
os.chdir(prev_wd)
sp = os.path.join(dir, subdir)
if not os.path.isdir(sp):
os.mkdir(sp)
def find_latest_binary(dir, pattern, log):
best_offer = None
for f in os.listdir(dir):
m = pattern.match(f)
if m is not None:
fp = os.path.join(dir, f)
mt = call_with_output("git log -n 1 --date-order --date=raw --pretty=format:\"%cd\"").strip("\"").split(" ")[0]
if best_offer == None or mt > best_offer[1]:
version, git_hash, bitness, platform, pversion = m.groups()
best_offer = (fp, mt, version, git_hash, bitness, platform, pversion)
return best_offer
def find_specific_binary(dir, pattern, version, git_hash, log):
for f in os.listdir(dir):
m = pattern.match(f)
if m is not None:
fp = os.path.join(dir, f)
fversion, fgit_hash, bitness, platform, pversion = m.groups()
if (version == None or fversion == version) and fgit_hash == git_hash:
return (fp, None, version, git_hash, bitness, platform, pversion)
return None
def get_platform_files(from_path, version, git_hash, platforms):
res = []
for pf in platforms:
fnpat = "z3-" + version + "." + git_hash + "-" + pf + "*.zip"
pp = os.path.join(from_path, fnpat)
matching_files = glob.glob(pp)
if REQUIRE_ALL_PLATFORMS == True and len(matching_files) == 0:
raise Z3NightlyException("No platform files for '%s' with version=%s and git_hash=%s." % (pf, version, git_hash))
elif len(matching_files) > 0:
res.append(matching_files[0])
return res
def pick_better(old, new, from_path, pattern, platforms):
if (old is None and new is not None) or (new[3] != old[3] and new[1] > old[1]):
return get_platform_files(from_path, new[2], new[3], platforms)
return None
def wipe_old_pkgs(to_repo, to_subdir, pattern, log):
prev_dir = os.getcwd()
os.chdir(to_repo)
for f in os.listdir(to_subdir):
if pattern.match(f) is not None:
call_logged('git rm "%s/%s"' % (to_subdir, f), log)
os.chdir(prev_dir)
def add_new_pkgs(files, to_repo, to_subdir, pattern, log):
prev_dir = os.getcwd()
os.chdir(to_repo)
for f in files:
f_to_path = os.path.join(to_subdir, os.path.basename(f))
shutil.copy2(os.path.join(prev_dir, f), f_to_path)
call_logged('git add -v %s' % f_to_path, log)
call_logged('git commit -v --amend -m "Automatic update of Z3 nightly packages."', log)
call_logged('git gc --aggressive --auto --prune=all', log)
call_logged('git push -v --force', log)
os.chdir(prev_dir)
def empty(d):
if not os.path.isdir(d):
os.mkdir(d)
for old_file in os.listdir(d):
ofp = os.path.join(d, old_file)
if os.path.isdir(ofp):
shutil.rmtree(ofp)
else:
os.remove(ofp)
def push(version, git_hash, log):
wd = os.getcwd()
try:
old_bin_path = os.path.join(FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR)
new_bin_path = os.path.join(Z3_BIN_LOCAL, Z3_BIN_SUBDIR)
update_git(FSTAR_BIN_URL, FSTAR_BIN_RBRANCH, FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR, log)
if git_hash == None:
update_git(Z3_BIN_URL, Z3_BIN_RBRANCH, Z3_BIN_LOCAL, Z3_BIN_SUBDIR, log)
best_old = find_latest_binary(old_bin_path, Z3_PKG_NAME_PAT, log)
best_new = find_latest_binary(new_bin_path, Z3_PKG_NAME_PAT, log)
better = pick_better(best_old, best_new, new_bin_path, Z3_PKG_NAME_PAT, PLATFORMS)
if better is not None:
wipe_old_pkgs(FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR, Z3_PKG_NAME_PAT, log)
add_new_pkgs(better, FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR, Z3_PKG_NAME_PAT, log)
else:
sb = find_specific_binary(new_bin_path, Z3_PKG_NAME_PAT, version, git_hash, log)
if sb == None:
raise Z3NightlyException("Z3 packages with git hash '%s' not found." % git_hash)
else:
pfiles = get_platform_files(new_bin_path, version, git_hash, PLATFORMS)
wipe_old_pkgs(FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR, Z3_PKG_NAME_PAT, log)
add_new_pkgs(pfiles, FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR, Z3_PKG_NAME_PAT, log)
pass
os.chdir(wd)
return 0
except Exception as ex:
os.chdir(wd)
traceback.print_exc(log)
log.close()
return 1
def get(binary_name, platform, bitness, log, Tested=True):
wd = os.getcwd()
try:
if Tested:
bsdir = os.path.join(FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR)
update_git(FSTAR_BIN_URL, FSTAR_BIN_RBRANCH, FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR, log, quiet=True)
else:
bsdir = os.path.join(Z3_BIN_LOCAL, Z3_BIN_SUBDIR)
update_git(Z3_BIN_URL, Z3_BIN_RBRANCH, Z3_BIN_LOCAL, Z3_BIN_SUBDIR, log, quiet=True)
empty(Z3_DIR)
for f in os.listdir(bsdir):
m = Z3_PKG_NAME_PAT.match(f)
if m is not None:
fp = os.path.join(bsdir, f)
version, git_hash, fbitness, fplatform, pversion = m.groups()
if fplatform == platform and fbitness == bitness:
zfn = os.path.join(bsdir, f)
# print("Extracting Z3 from %s" % zfn)
with zipfile.ZipFile(zfn, "r") as zf:
zf.extractall(Z3_DIR)
break
Z3_BINARY_PATH = ""
for root, dirs, files in os.walk(Z3_DIR):
for f in files:
fp = os.path.join(root, f)
if f == binary_name:
Z3_BINARY_PATH = fp
if f.endswith("dll"):
os.chmod(fp, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) # Cygwin wants +x on dlls.
if not os.path.isfile(Z3_BINARY_PATH):
raise Z3NightlyException("Z3 not where it should be.")
else:
print("%s" % Z3_BINARY_PATH)
os.chmod(Z3_BINARY_PATH, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
os.chdir(wd)
return 0
except Exception as ex:
os.chdir(wd)
traceback.print_exc(log)
log.close()
return 1
def print_help():
print("Usage: %s (get-tested|get-latest|push)" % sys.argv[0])
if __name__ =='__main__':
if len(sys.argv) < 2:
print_help()
sys.exit(1)
else:
r = 1
log = sys.stdout
op = sys.argv[1]
if op == "get-tested":
bn, pfm, bits = get_platform()
if len(sys.argv) >= 3:
pfm = sys.argv[2]
if len(sys.argv) >= 4:
bits = sys.argv[3]
r = get(bn, pfm, bits, log, Tested=True)
elif op == "get-latest":
bn, pfm, bits = get_platform()
if len(sys.argv) >= 3:
pfm = sys.argv[2]
if len(sys.argv) >= 4:
bits = sys.argv[3]
r = get(bn, pfm, bits, log, Tested=False)
elif op == "push":
version = None
git_hash = None
if len(sys.argv) >= 3:
version = sys.argv[2]
if len(sys.argv) >= 4:
git_hash = sys.argv[3]
r = push(version, git_hash, log)
else:
print("Error: Unknown operation '" + op + "'")
print_help()
r = 1
sys.exit(r)
| [
"zipfile.ZipFile",
"re.compile",
"sys.exit",
"os.walk",
"os.remove",
"os.listdir",
"subprocess.Popen",
"os.chmod",
"platform.system",
"os.path.isdir",
"subprocess.call",
"os.mkdir",
"traceback.print_exc",
"glob.glob",
"os.path.isfile",
"platform.architecture",
"os.path.join",
"platform.linux_distribution",
"os.getcwd",
"os.chdir",
"os.path.basename",
"shutil.rmtree"
] | [((394, 435), 'os.path.join', 'os.path.join', (['"""nightly"""', '"""fstar-binaries"""'], {}), "('nightly', 'fstar-binaries')\n", (406, 435), False, 'import os\n'), ((570, 608), 'os.path.join', 'os.path.join', (['"""nightly"""', '"""z3-binaries"""'], {}), "('nightly', 'z3-binaries')\n", (582, 608), False, 'import os\n'), ((687, 789), 're.compile', 're.compile', (['"""^z3-([0-9].[0-9].[0-9]).([a-z0-9]{12})-(x86|x64)-([a-zA-Z]*)-?([\\\\.0-9]*).zip$"""'], {}), "(\n '^z3-([0-9].[0-9].[0-9]).([a-z0-9]{12})-(x86|x64)-([a-zA-Z]*)-?([\\\\.0-9]*).zip$'\n )\n", (697, 789), False, 'import re\n'), ((789, 818), 'os.path.join', 'os.path.join', (['"""nightly"""', '"""z3"""'], {}), "('nightly', 'z3')\n", (801, 818), False, 'import os\n'), ((911, 928), 'platform.system', 'platform.system', ([], {}), '()\n', (926, 928), False, 'import platform\n'), ((942, 965), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (963, 965), False, 'import platform\n'), ((2344, 2399), 'subprocess.call', 'subprocess.call', (['cs'], {'stdin': 'None', 'stdout': 'log', 'stderr': 'log'}), '(cs, stdin=None, stdout=log, stderr=log)\n', (2359, 2399), False, 'import subprocess\n'), ((2661, 2729), 'subprocess.Popen', 'subprocess.Popen', (['cs'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (2677, 2729), False, 'import subprocess\n'), ((3446, 3471), 'os.path.join', 'os.path.join', (['dir', 'subdir'], {}), '(dir, subdir)\n', (3458, 3471), False, 'import os\n'), ((3602, 3617), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (3612, 3617), False, 'import os\n'), ((4155, 4170), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (4165, 4170), False, 'import os\n'), ((5377, 5388), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5386, 5388), False, 'import os\n'), ((5393, 5410), 'os.chdir', 'os.chdir', (['to_repo'], {}), '(to_repo)\n', (5401, 5410), False, 'import os\n'), ((5424, 5445), 'os.listdir', 'os.listdir', (['to_subdir'], {}), '(to_subdir)\n', (5434, 5445), False, 'import os\n'), ((5556, 5574), 'os.chdir', 'os.chdir', (['prev_dir'], {}), '(prev_dir)\n', (5564, 5574), False, 'import os\n'), ((5650, 5661), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5659, 5661), False, 'import os\n'), ((5666, 5683), 'os.chdir', 'os.chdir', (['to_repo'], {}), '(to_repo)\n', (5674, 5683), False, 'import os\n'), ((6085, 6103), 'os.chdir', 'os.chdir', (['prev_dir'], {}), '(prev_dir)\n', (6093, 6103), False, 'import os\n'), ((6189, 6202), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (6199, 6202), False, 'import os\n'), ((6391, 6402), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6400, 6402), False, 'import os\n'), ((7996, 8007), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8005, 8007), False, 'import os\n'), ((3002, 3020), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (3015, 3020), False, 'import os\n'), ((3129, 3140), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3138, 3140), False, 'import os\n'), ((3149, 3162), 'os.chdir', 'os.chdir', (['dir'], {}), '(dir)\n', (3157, 3162), False, 'import os\n'), ((3419, 3436), 'os.chdir', 'os.chdir', (['prev_wd'], {}), '(prev_wd)\n', (3427, 3436), False, 'import os\n'), ((3483, 3500), 'os.path.isdir', 'os.path.isdir', (['sp'], {}), '(sp)\n', (3496, 3500), False, 'import os\n'), ((3510, 3522), 'os.mkdir', 'os.mkdir', (['sp'], {}), '(sp)\n', (3518, 3522), False, 'import os\n'), ((4707, 4737), 'os.path.join', 'os.path.join', (['from_path', 'fnpat'], {}), '(from_path, fnpat)\n', (4719, 4737), False, 'import os\n'), ((4763, 4776), 'glob.glob', 'glob.glob', (['pp'], {}), '(pp)\n', (4772, 4776), False, 'import glob\n'), ((6130, 6146), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (6143, 6146), False, 'import os\n'), ((6156, 6167), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (6164, 6167), False, 'import os\n'), ((6218, 6243), 'os.path.join', 'os.path.join', (['d', 'old_file'], {}), '(d, old_file)\n', (6230, 6243), False, 'import os\n'), ((6255, 6273), 'os.path.isdir', 'os.path.isdir', (['ofp'], {}), '(ofp)\n', (6268, 6273), False, 'import os\n'), ((6435, 6482), 'os.path.join', 'os.path.join', (['FSTAR_BIN_LOCAL', 'FSTAR_BIN_SUBDIR'], {}), '(FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR)\n', (6447, 6482), False, 'import os\n'), ((6506, 6547), 'os.path.join', 'os.path.join', (['Z3_BIN_LOCAL', 'Z3_BIN_SUBDIR'], {}), '(Z3_BIN_LOCAL, Z3_BIN_SUBDIR)\n', (6518, 6547), False, 'import os\n'), ((7778, 7790), 'os.chdir', 'os.chdir', (['wd'], {}), '(wd)\n', (7786, 7790), False, 'import os\n'), ((8426, 8443), 'os.listdir', 'os.listdir', (['bsdir'], {}), '(bsdir)\n', (8436, 8443), False, 'import os\n'), ((9004, 9019), 'os.walk', 'os.walk', (['Z3_DIR'], {}), '(Z3_DIR)\n', (9011, 9019), False, 'import os\n'), ((9498, 9566), 'os.chmod', 'os.chmod', (['Z3_BINARY_PATH', '(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)'], {}), '(Z3_BINARY_PATH, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n', (9506, 9566), False, 'import os\n'), ((9576, 9588), 'os.chdir', 'os.chdir', (['wd'], {}), '(wd)\n', (9584, 9588), False, 'import os\n'), ((9897, 9908), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9905, 9908), False, 'import sys\n'), ((10916, 10927), 'sys.exit', 'sys.exit', (['r'], {}), '(r)\n', (10924, 10927), False, 'import sys\n'), ((1170, 1199), 'platform.linux_distribution', 'platform.linux_distribution', ([], {}), '()\n', (1197, 1199), False, 'import platform\n'), ((3691, 3711), 'os.path.join', 'os.path.join', (['dir', 'f'], {}), '(dir, f)\n', (3703, 3711), False, 'import os\n'), ((4244, 4264), 'os.path.join', 'os.path.join', (['dir', 'f'], {}), '(dir, f)\n', (4256, 4264), False, 'import os\n'), ((5748, 5767), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (5764, 5767), False, 'import os\n'), ((5790, 5815), 'os.path.join', 'os.path.join', (['prev_dir', 'f'], {}), '(prev_dir, f)\n', (5802, 5815), False, 'import os\n'), ((6287, 6305), 'shutil.rmtree', 'shutil.rmtree', (['ofp'], {}), '(ofp)\n', (6300, 6305), False, 'import shutil\n'), ((6332, 6346), 'os.remove', 'os.remove', (['ofp'], {}), '(ofp)\n', (6341, 6346), False, 'import os\n'), ((7844, 7856), 'os.chdir', 'os.chdir', (['wd'], {}), '(wd)\n', (7852, 7856), False, 'import os\n'), ((7865, 7889), 'traceback.print_exc', 'traceback.print_exc', (['log'], {}), '(log)\n', (7884, 7889), False, 'import traceback\n'), ((8056, 8103), 'os.path.join', 'os.path.join', (['FSTAR_BIN_LOCAL', 'FSTAR_BIN_SUBDIR'], {}), '(FSTAR_BIN_LOCAL, FSTAR_BIN_SUBDIR)\n', (8068, 8103), False, 'import os\n'), ((8247, 8288), 'os.path.join', 'os.path.join', (['Z3_BIN_LOCAL', 'Z3_BIN_SUBDIR'], {}), '(Z3_BIN_LOCAL, Z3_BIN_SUBDIR)\n', (8259, 8288), False, 'import os\n'), ((9335, 9365), 'os.path.isfile', 'os.path.isfile', (['Z3_BINARY_PATH'], {}), '(Z3_BINARY_PATH)\n', (9349, 9365), False, 'import os\n'), ((9642, 9654), 'os.chdir', 'os.chdir', (['wd'], {}), '(wd)\n', (9650, 9654), False, 'import os\n'), ((9663, 9687), 'traceback.print_exc', 'traceback.print_exc', (['log'], {}), '(log)\n', (9682, 9687), False, 'import traceback\n'), ((8537, 8559), 'os.path.join', 'os.path.join', (['bsdir', 'f'], {}), '(bsdir, f)\n', (8549, 8559), False, 'import os\n'), ((9070, 9091), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (9082, 9091), False, 'import os\n'), ((8730, 8752), 'os.path.join', 'os.path.join', (['bsdir', 'f'], {}), '(bsdir, f)\n', (8742, 8752), False, 'import os\n'), ((9227, 9283), 'os.chmod', 'os.chmod', (['fp', '(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)'], {}), '(fp, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n', (9235, 9283), False, 'import os\n'), ((8837, 8862), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zfn', '"""r"""'], {}), "(zfn, 'r')\n", (8852, 8862), False, 'import zipfile\n')] |
import sacrebleu
class BLEUScore:
def __init__(self, trg_lang='en', lowercase=True):
self.scorer = sacrebleu.BLEU(trg_lang=trg_lang, effective_order=True, lowercase=lowercase)
def score(self, hyp: str, ref: str, src=None, normalize=True, *args, **kwargs):
this_score = self.scorer.sentence_score(hyp, [ref])
return {'bleu_score': {'1gram_precision': this_score.precisions[0],
'2gram_precision': this_score.precisions[1],
'3gram_precision': this_score.precisions[2],
'4gram_precision': this_score.precisions[3],
'brevity_penalty': this_score.bp,
'score': this_score.score }}
class CHRFScore:
def __init__(self, lowercase=True):
self.scorer = sacrebleu.CHRF(lowercase=lowercase)
def score(self, hyp: str, ref: str, src=None, *args, **kwargs):
this_score = self.scorer.sentence_score(hyp, [ref])
return {'chrf_score': {this_score.name: this_score.score,
'score': this_score.score}}
class TERScore:
def __init__(self, lowercase=True):
self.scorer = sacrebleu.TER()
self.lowercase = lowercase
def score(self, hyp: str, ref: str, src=None, *args, **kwargs):
if self.lowercase:
hyp, ref = hyp.lower(), ref.lower()
this_score = self.scorer.sentence_score(hyp, [ref])
return {'ter_score': {'score': this_score.score}}
| [
"sacrebleu.BLEU",
"sacrebleu.CHRF",
"sacrebleu.TER"
] | [((112, 188), 'sacrebleu.BLEU', 'sacrebleu.BLEU', ([], {'trg_lang': 'trg_lang', 'effective_order': '(True)', 'lowercase': 'lowercase'}), '(trg_lang=trg_lang, effective_order=True, lowercase=lowercase)\n', (126, 188), False, 'import sacrebleu\n'), ((773, 808), 'sacrebleu.CHRF', 'sacrebleu.CHRF', ([], {'lowercase': 'lowercase'}), '(lowercase=lowercase)\n', (787, 808), False, 'import sacrebleu\n'), ((1159, 1174), 'sacrebleu.TER', 'sacrebleu.TER', ([], {}), '()\n', (1172, 1174), False, 'import sacrebleu\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 07:57:40 2020
@author: usama
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if __name__ =='__main__':
plt.close('all')
savePlot=False and True
loadFile='./Dataset/MgO.xlsx'
savePath='./MgO_Result/DataViz/'
df = pd.read_excel (loadFile)
data=df.loc[:, df.columns != 'QE']
label=df['QE']
plt.close('all')
length=11
height=0.625*length
d=np.matrix(data)
p1=np.matmul(d.T,d)
p1=p1.I
p2=np.matmul(d,p1)
H=np.matmul(p2,d.T)
y=np.matrix(label)
ypred=np.matmul(H,y.T)
err=ypred.T - y
err=np.array(err)[0]
Hdiag=H.diagonal()
Hdiag=np.array(Hdiag)[0]
p=sum(Hdiag)
n=len(Hdiag)
H_star= 3 * (p+1)/n
# H_star=0.3
l=np.where(Hdiag>H_star)[0]
l=np.append(l,np.where(abs(err)>3)[0])
plt.figure(figsize=(length, height))
if list(l) :
outX=Hdiag[l]; outY=err[l]
err=np.delete(err,l)
Hdiag=np.delete(Hdiag,l)
plt.scatter(outX,outY,label='Outliers',marker='x',c='r')
plt.scatter(Hdiag,err,label='Data points')
plt.hlines(3,xmin=0,xmax=1.2*H_star,colors='r',linestyles='--',label='Outlier Upper Limit')
plt.hlines(-3,xmin=0,xmax=1.2*H_star,colors='r',linestyles='-.',label='Outlier Lower Limit')
plt.vlines(H_star,ymin=-3.2,ymax=3.2,colors='g',linestyles='-.',label='Leverage Limit ({0:.3f})'.format(H_star))
plt.ylim([-5,5])
plt.xlim([0,1.25*H_star])
plt.grid(True)
plt.xlabel('Predicted Response',fontsize=16)
plt.ylabel('Residuals',fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend()
if savePlot:
plt.savefig(savePath+'HatLev.png',quality=95)
plt.savefig(savePath+'HatLev.jpg',quality=95)
plt.savefig(savePath+'HatLev.eps',quality=95)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"pandas.read_excel",
"numpy.where",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.matmul",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.figure",
"numpy.matrix"
] | [((210, 226), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (219, 226), True, 'import matplotlib.pyplot as plt\n'), ((335, 358), 'pandas.read_excel', 'pd.read_excel', (['loadFile'], {}), '(loadFile)\n', (348, 358), True, 'import pandas as pd\n'), ((422, 438), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (431, 438), True, 'import matplotlib.pyplot as plt\n'), ((484, 499), 'numpy.matrix', 'np.matrix', (['data'], {}), '(data)\n', (493, 499), True, 'import numpy as np\n'), ((507, 524), 'numpy.matmul', 'np.matmul', (['d.T', 'd'], {}), '(d.T, d)\n', (516, 524), True, 'import numpy as np\n'), ((543, 559), 'numpy.matmul', 'np.matmul', (['d', 'p1'], {}), '(d, p1)\n', (552, 559), True, 'import numpy as np\n'), ((565, 583), 'numpy.matmul', 'np.matmul', (['p2', 'd.T'], {}), '(p2, d.T)\n', (574, 583), True, 'import numpy as np\n'), ((594, 610), 'numpy.matrix', 'np.matrix', (['label'], {}), '(label)\n', (603, 610), True, 'import numpy as np\n'), ((626, 643), 'numpy.matmul', 'np.matmul', (['H', 'y.T'], {}), '(H, y.T)\n', (635, 643), True, 'import numpy as np\n'), ((914, 950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(length, height)'}), '(figsize=(length, height))\n', (924, 950), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1200), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Hdiag', 'err'], {'label': '"""Data points"""'}), "(Hdiag, err, label='Data points')\n", (1167, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1306), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(3)'], {'xmin': '(0)', 'xmax': '(1.2 * H_star)', 'colors': '"""r"""', 'linestyles': '"""--"""', 'label': '"""Outlier Upper Limit"""'}), "(3, xmin=0, xmax=1.2 * H_star, colors='r', linestyles='--', label\n ='Outlier Upper Limit')\n", (1213, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1407), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(-3)'], {'xmin': '(0)', 'xmax': '(1.2 * H_star)', 'colors': '"""r"""', 'linestyles': '"""-."""', 'label': '"""Outlier Lower Limit"""'}), "(-3, xmin=0, xmax=1.2 * H_star, colors='r', linestyles='-.',\n label='Outlier Lower Limit')\n", (1314, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1535), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-5, 5]'], {}), '([-5, 5])\n', (1526, 1535), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1567), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1.25 * H_star]'], {}), '([0, 1.25 * H_star])\n', (1547, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1583), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1577, 1583), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1633), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted Response"""'], {'fontsize': '(16)'}), "('Predicted Response', fontsize=16)\n", (1598, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residuals"""'], {'fontsize': '(16)'}), "('Residuals', fontsize=16)\n", (1647, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1677, 1700), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1687, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1705, 1728), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1715, 1728), True, 'import matplotlib.pyplot as plt\n'), ((1733, 1745), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1743, 1745), True, 'import matplotlib.pyplot as plt\n'), ((676, 689), 'numpy.array', 'np.array', (['err'], {}), '(err)\n', (684, 689), True, 'import numpy as np\n'), ((731, 746), 'numpy.array', 'np.array', (['Hdiag'], {}), '(Hdiag)\n', (739, 746), True, 'import numpy as np\n'), ((840, 864), 'numpy.where', 'np.where', (['(Hdiag > H_star)'], {}), '(Hdiag > H_star)\n', (848, 864), True, 'import numpy as np\n'), ((1020, 1037), 'numpy.delete', 'np.delete', (['err', 'l'], {}), '(err, l)\n', (1029, 1037), True, 'import numpy as np\n'), ((1051, 1070), 'numpy.delete', 'np.delete', (['Hdiag', 'l'], {}), '(Hdiag, l)\n', (1060, 1070), True, 'import numpy as np\n'), ((1087, 1147), 'matplotlib.pyplot.scatter', 'plt.scatter', (['outX', 'outY'], {'label': '"""Outliers"""', 'marker': '"""x"""', 'c': '"""r"""'}), "(outX, outY, label='Outliers', marker='x', c='r')\n", (1098, 1147), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1819), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savePath + 'HatLev.png')"], {'quality': '(95)'}), "(savePath + 'HatLev.png', quality=95)\n", (1782, 1819), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1873), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savePath + 'HatLev.jpg')"], {'quality': '(95)'}), "(savePath + 'HatLev.jpg', quality=95)\n", (1836, 1873), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1927), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savePath + 'HatLev.eps')"], {'quality': '(95)'}), "(savePath + 'HatLev.eps', quality=95)\n", (1890, 1927), True, 'import matplotlib.pyplot as plt\n')] |
import re
from ..tokenization import FullTokenizer
from ..utils import (
get_or_make_label_encoder, BOS_TOKEN, EOS_TOKEN)
from ..create_generators import create_pretraining_generator, create_single_problem_generator
from .ner_data import gold_horse_ent_type_process_fn, read_ner_data
def weibo_fake_cls(params, mode):
"""Just a test problem to test multiproblem support
Arguments:
params {Params} -- params
mode {mode} -- mode
"""
tokenizer = FullTokenizer(vocab_file=params.vocab_file)
data = read_ner_data(file_pattern='data/ner/weiboNER*',
proc_fn=gold_horse_ent_type_process_fn)
if mode == 'train':
data = data['train']
else:
data = data['eval']
inputs_list = data['inputs'][:100]
target_list = data['target'][:100]
new_target_list = ['1' if len(set(t)) > 1 else '0' for t in target_list]
label_encoder = get_or_make_label_encoder(
params, 'weibo_fake_cls', mode, new_target_list, '0')
return create_single_problem_generator('weibo_fake_cls',
inputs_list,
new_target_list,
label_encoder,
params,
tokenizer,
mode)
def weibo_fake_seq2seq_tag(params, mode: str):
tokenizer = FullTokenizer(vocab_file=params.vocab_file)
data = read_ner_data(file_pattern='data/ner/weiboNER*',
proc_fn=gold_horse_ent_type_process_fn)
if mode == 'train':
data = data['train']
else:
data = data['eval']
inputs_list = data['inputs'][:100]
target_list = data['target'][:100]
new_target_list = [['1', '2'] for t in target_list]
label_encoder = get_or_make_label_encoder(
params,
'weibo_fake_seq2seq_tag',
mode,
[BOS_TOKEN, '1', '2', EOS_TOKEN],
zero_class=BOS_TOKEN)
return create_single_problem_generator(
'weibo_fake_seq2seq_tag',
inputs_list,
new_target_list,
label_encoder,
params,
tokenizer,
mode)
def weibo_pretrain(params, mode):
sentence_split = r'[.!?。?!]'
tokenizer = FullTokenizer(vocab_file=params.vocab_file)
data = read_ner_data(file_pattern='data/ner/weiboNER*',
proc_fn=gold_horse_segment_process_fn)
if mode == 'train':
data = data['train']
else:
data = data['eval']
inputs_list = data['inputs']
segmented_list = []
for document in inputs_list:
segmented_list.append([])
doc_string = ''.join(document)
splited_doc = re.split(sentence_split, doc_string)
for sentence in splited_doc:
if sentence:
segmented_list[-1].append(list(sentence))
segmented_list = [doc for doc in segmented_list if doc]
return create_pretraining_generator('weibo_pretrain',
segmented_list,
None,
None,
params,
tokenizer,
mode)
def weibo_fake_seq_tag(params, mode):
tokenizer = FullTokenizer(vocab_file=params.vocab_file)
data = read_ner_data(file_pattern='data/ner/weiboNER*',
proc_fn=gold_horse_ent_type_process_fn)
if mode == 'train':
data = data['train']
else:
data = data['eval']
inputs_list = data['inputs'][:100]
target_list = data['target'][:100]
flat_label = [item for sublist in target_list for item in sublist]
label_encoder = get_or_make_label_encoder(
params, 'weibo_fake_seq_tag', mode, flat_label)
return create_single_problem_generator('weibo_fake_seq_tag',
inputs_list,
target_list,
label_encoder,
params,
tokenizer,
mode)
| [
"re.split"
] | [((2775, 2811), 're.split', 're.split', (['sentence_split', 'doc_string'], {}), '(sentence_split, doc_string)\n', (2783, 2811), False, 'import re\n')] |
# coding: utf8
from __future__ import unicode_literals, print_function, division
import pytest
from csvw.metadata import Column
from pylexibank.db import Database, ColSpec, schema
def test_ColSpec():
col = ColSpec(name='c', csvw_type='float')
assert col.convert(5) == '5'
def test_schema(cldf_dataset):
cldf_dataset['ParameterTable', 'Name'].name = 'thename'
assert cldf_dataset['ParameterTable', 'http://cldf.clld.org/v1.0/terms.rdf#name'].header == 'thename'
tables, reftables = schema(cldf_dataset)
assert len(tables) == 4
assert len(reftables) == 2
for t in tables:
ptschema = t.sql
if 'ParameterTable' in ptschema:
assert "`Name`" in ptschema
break
else:
assert False
def test_db(tmpdir, dataset, mocker, capsys):
db = Database(str(tmpdir.join('lexibank.sqlite')))
db.load(dataset)
db.create(exists_ok=True)
with pytest.raises(ValueError):
db.create()
db.create(force=True)
db.load(dataset)
db.load_glottolog_data(dataset.glottolog)
db.load_concepticon_data(mocker.Mock(conceptsets={}))
for sql in db.sql:
db.fetchall(sql)
with db.connection() as conn:
db.fetchall('select * from dataset', conn=conn, verbose=True)
out, _ = capsys.readouterr()
assert 'select' in out
db.create(force=True)
db.load(dataset)
cols = dataset.cldf.wl['FormTable'].tableSchema.columns
cols.append(Column(name='custom'))
db.load(dataset)
cols.pop()
cols.append(Column(name='custom', datatype='integer'))
with pytest.raises(ValueError):
db.load(dataset)
cols.pop()
db.load(dataset)
def test_db_multiple_datasets(tmpdir, dataset, dataset_cldf, capsys):
db = Database(str(tmpdir.join('lexibank.sqlite')))
db.load(dataset)
db.load(dataset_cldf, verbose=True)
with db.connection() as conn:
res = db.fetchall('select `id`, `name` from LanguageTable', conn=conn)
assert len(res) == 3
assert ('1', 'Lang CLDF') in [(r[0], r[1]) for r in res]
res = db.fetchall('select `id`, `value` from FormTable', conn=conn)
assert ('1', 'abc') in [(r[0], r[1]) for r in res]
| [
"pylexibank.db.schema",
"csvw.metadata.Column",
"pylexibank.db.ColSpec",
"pytest.raises"
] | [((214, 250), 'pylexibank.db.ColSpec', 'ColSpec', ([], {'name': '"""c"""', 'csvw_type': '"""float"""'}), "(name='c', csvw_type='float')\n", (221, 250), False, 'from pylexibank.db import Database, ColSpec, schema\n'), ((507, 527), 'pylexibank.db.schema', 'schema', (['cldf_dataset'], {}), '(cldf_dataset)\n', (513, 527), False, 'from pylexibank.db import Database, ColSpec, schema\n'), ((926, 951), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (939, 951), False, 'import pytest\n'), ((1460, 1481), 'csvw.metadata.Column', 'Column', ([], {'name': '"""custom"""'}), "(name='custom')\n", (1466, 1481), False, 'from csvw.metadata import Column\n'), ((1535, 1576), 'csvw.metadata.Column', 'Column', ([], {'name': '"""custom"""', 'datatype': '"""integer"""'}), "(name='custom', datatype='integer')\n", (1541, 1576), False, 'from csvw.metadata import Column\n'), ((1587, 1612), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1600, 1612), False, 'import pytest\n')] |
import time
import serial
import os
def startUp():
print("*****************************************************")
print(" PUMP: StartUp ")
print("*****************************************************\n")
print("Please read the instructions before continuing")
print("1. Make sure the ports are correctly input in the config file.")
print("2. Initially, pump should be stopped (NOT TURNED OFF!). Read documentation if you do not understand")
os.system('pause')
print("Starting Process . . . ")
time.sleep(1)
def connectPump(port):
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(
port=port,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
return ser
def sendCommand(ser, command, waitForOutput=True, tries=3):
# For debug
# return "OK\r\n"
# try sending the send three times.
for _ in range(tries):
# encode string
commandEncoded = command.encode()
# write
ser.write(commandEncoded)
# wait for the output to return
if waitForOutput:
time.sleep(0.1)
output = ''
while ser.inWaiting() > 0:
output += ser.read(1).decode()
if output != '':
return output
return None
def setFlowRate(ser, flow_rate, waitForOutput=True, tries=2):
try_count = 0
while try_count <= tries:
# change . to ,
fowRateEURO = str(round(float(flow_rate), 1)).replace('.', ',')
while len(fowRateEURO) != 5:
fowRateEURO = '0' + fowRateEURO
output = sendCommand(ser, "SMM=" + fowRateEURO + "!", waitForOutput=waitForOutput)
valid = validate_output(output)
if valid:
return valid
try_count += 1
if try_count > tries:
return False
def togglePump(ser, waitForOutput=True, tries=2):
try_count = 0
while try_count <= tries:
output = sendCommand(ser, "TA2!", waitForOutput=waitForOutput)
valid = validate_output(output)
if valid:
return valid
try_count += 1
if try_count > tries:
return False
def validate_output(output):
if output is not None:
print("Output from pump:", output.strip())
if output == "OK\r\n":
print("Successfully send command\n")
return True
else:
print("Failed to send command\n")
return False
else:
print("Error: no output from pump. possibly the rs232 got disconnected.")
def getDesiredFlowRate(elapsedTime, flowRates, timeList):
indexTime = 0
if elapsedTime > timeList[-1]:
indexTime = -1
else:
for index, pointInTime in enumerate(timeList):
if elapsedTime < pointInTime:
indexTime = index - 1
break
return flowRates[indexTime]
def serialConsole(ser):
#ser.open()
print('Enter your commands below.\r\nInsert "exit" to leave the application.')
userInput = 1
while 1:
# get keyboard input
# Python 2 users
# input = raw_input(">> ")
# Python 3 users
userInput = input(">> ")
# type exit to exit console
if userInput == 'exit':
ser.close()
exit()
else:
# send the character to the device
# (note that I dont happend a \r\n carriage return and line feed to the characters - this is not requested by my device)
#convert to bytes
userInputBytes = userInput.encode()
ser.write(userInputBytes)
out = ''
# let's wait one second before reading output (let's give device time to answer)
time.sleep(1)
while ser.inWaiting() > 0:
out += ser.read(1).decode()
if out != '':
print(">> " + out)
if __name__ == '__main__':
ser = connectPump('COM3')
print(ser.port)
# send one command
print(sendCommand(ser, "DSP?", waitForOutput=True))
# start console
serialConsole(ser)
| [
"os.system",
"serial.Serial",
"time.sleep"
] | [((508, 526), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (517, 526), False, 'import os\n'), ((568, 581), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (578, 581), False, 'import time\n'), ((717, 845), 'serial.Serial', 'serial.Serial', ([], {'port': 'port', 'baudrate': '(9600)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS'}), '(port=port, baudrate=9600, parity=serial.PARITY_NONE, stopbits\n =serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)\n', (730, 845), False, 'import serial\n'), ((1262, 1277), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1272, 1277), False, 'import time\n'), ((3900, 3913), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3910, 3913), False, 'import time\n')] |
import RPi.GPIO as GPIO
import serial
import http.client
import urllib
import threading
import string
import imaplib
import smtplib
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import time
import email
from time import strftime
import requests
from email.parser import Parser
cntTemp = 0
cntOsv = 0
cntRaz = 0
i = 0
ii = 0
prosecnaTemp = 0
prosecnaOsv = 0
prosecnaRaz = 0
data=0
flagStart = 0
flagZelena = 0
flagPrviPodatak = 0
PodaciCnt = 0
proveraEnabled = 1
flagPeriodTemp = 0
flagPeriodOsv = 0
flagPeriodRaz = 0
prosecnaTemp = 0
prosecnaOsv = 0
prosecnaRaz = 0
#vremena za slanje izvestaja
vremeTemperature = 0
vremeRazdaljina = 0
vremeOsvetljenosti = 0
# srednje vrednosti podataka
temperaturaSrednja = 0
razdaljinaSrednja = 0
osvetljenostSrednja = 0
#brojac
brojac = 0
# merenje vremena
timerTemp = 0
timerRaz = 0
timerOsv = 0
#lsusb
#cd /dev pa onda ls tty* pa ukljucim Arduino u USB pa opet ls tty* i vidim razl.
ser = serial.Serial('/dev/ttyACM0',9600)
LEDCrvena = 3
LEDZelena = 21
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login("<EMAIL>", "password")
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LEDCrvena, GPIO.OUT)
GPIO.output(LEDCrvena, GPIO.LOW)
GPIO.setup(LEDZelena, GPIO.OUT)
GPIO.output(LEDZelena, GPIO.LOW)
def ZelenaUgasi():
GPIO.output(LEDZelena, GPIO.LOW)
def CrvenaUgasi():
global proveraEnabled
GPIO.output(LEDCrvena, GPIO.LOW)
proveraEnabled = 1
def izvestajTemperatura():
#funkcija za slanje izvestaja putem imejla
fromaddr = "<EMAIL>"
toaddr = "<EMAIL>"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Temperatura"
#vremeTemperature uzimamo iz konf maila, a temperaturaKonfig je prosecna temp
body = "Средња температура у последњих "+str(vremeTemperature) +" минута је " + str(temperaturaSrednja) + " степени С."
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "password")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def izvestajOsvetljenost():
#funkcija za slanje izvestaja putem imejla
fromaddr = "<EMAIL>"
toaddr = "<EMAIL>"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Osvetljenost"
body = "Средња осветљеност у последњих "+str(vremeOsvetljenosti) +" минута је "+str(osvetljenostSrednja) + " lux."
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "password")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def izvestajRazdaljina():
#funkcija za slanje izvestaja putem imejla
fromaddr = "<EMAIL>"
toaddr = "<EMAIL>"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Razdaljina"
body = "Средње растојање у последњих "+str(vremeRazdaljina) +" минута је "+str(razdaljinaSrednja) + " cm."
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "password")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def CheckMail():
global i, ii, vremeTemperature,vremeRazdaljina, vremeOsvetljenosti
global osvetljenostSrednja, temperaturaSrednja, razdaljinaSrednja
global prosecnaTemp, prosecnaOsv, prosecnaRaz
global cntTemp, cntOsv, cntRaz, flagZelena
global timerTemp, timerRaz, timerOsv
global flagPeriodTemp, flagPeriodOsv, flagPeriodRaz
mail.list()
mail.select('inbox')
result, podaci = mail.uid('search', None, '(SUBJECT "Posalji" UNSEEN)')
result1, podaci1 = mail.uid('search', None, '(SUBJECT "Konfiguracija" UNSEEN)')
i = len(podaci[0].split())
ii = len(podaci1[0].split())
for x in range(i):
latest_email_uid = podaci[0].split()[x]
#uzimanje mejla
result, email_data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = email_data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
#prolazak kroz mejl
for part in email_message.walk():
if part.get_content_type() == "text/plain":
body = part.get_payload(decode=True)
#telo mejla
bodystr=str(body)
prvi,drugi = bodystr.split("b'")
split = drugi.split("\\r\\n")
count = split.__len__()
for z in range(count-1):
word = split[z]
if word=="Temperatura":
if flagPeriodTemp==1:
print("Температура у режиму периода")
else:
print("Температура послата")
vremeTemperature = int(timerTemp/60)
temperaturaSrednja = prosecnaTemp/cntTemp
temperaturaSrednja = round(temperaturaSrednja,2)
izvestajTemperatura()
timerTemp = 0
temperaturaSrednja = 0
prosecnaTemp = 0
cntTemp = 0
if word=="Razdaljina":
if flagPeriodRaz==1:
print("Раздаљина у режиму периода")
else:
print("Растојање послато")
vremeRazdaljina = int(timerRaz/60)
razdaljinaSrednja = prosecnaRaz/cntRaz
razdaljinaSrednja = round(razdaljinaSrednja,2)
izvestajRazdaljina()
timerRaz = 0
razdaljinaSrednja = 0
prosecnaRaz = 0
cntRaz = 0
if word=="Osvetljenost":
if flagPeriodOsv==1:
print("Осветљеност у режиму периода")
else:
print("Осветљеност послата")
vremeOsvetljenosti = int(timerOsv/60)
osvetljenostSrednja = prosecnaOsv/cntOsv
osvetljenostSrednja = round(osvetljenostSrednja,2)
izvestajOsvetljenost()
timerOsv = 0
osvetljenostSrednja = 0
prosecnaOsv = 0
cntOsv = 0
for x in range(ii):
latest_email_uid = podaci1[0].split()[x]
#uzimanje mejla
result, email_data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = email_data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
#prolazak kroz mejla
for part in email_message.walk():
if part.get_content_type() == "text/plain":
body = part.get_payload(decode=True)
#telo mejla
bodystr=str(body)
prvi,drugi = bodystr.split("b'")
split = drugi.split("\\r\\n")
count = split.__len__()
konftime = datetime.now().strftime("%H:%M:%S")
ser.write(konftime.encode())
flagZelena = 1
for z in range(count-1):
word = split[z]
a,b = word.split(":")
b,c = b.split(",")
if a =="Temperatura":
if b=="zahtev":
vremeTemperature = 0
flagPeriodTemp = 0
prosecnaTemp = 0
cntTemp = 0
temperaturaSrednja = 0
timerTemp = 0
print("Температура [захтев]")
elif b=="period":
vremeTemperature = int(c)
flagPeriodTemp = 1
timerTemp = 0
temperaturaSrednja = 0
prosecnaTemp = 0
cntTemp = 0
print("Температура [период]")
if a =="Osvetljenost":
if b=="zahtev":
vremeOsvetljenosti = 0
flagPeriodOsv = 0
prosecnaOsv = 0
cntOsv = 0
osvetljenostSrednja = 0
timerOsv = 0
print("Осветљеност [захтев]")
elif b=="period":
flagPeriodOsv = 1
vremeOsvetljenosti = int(c)
timerOsv = 0
osvetljenostSrednja = 0
prosecnaOsv = 0
cntOsv = 0
print("Оветљеност [период]")
if a =="Razdaljina":
if b=="zahtev":
vremeRazdaljina = 0
flagPeriodRaz = 0
timerRaz = 0
razdaljinaSrednja = 0
prosecnaRaz = 0
cntRaz = 0
print("Раздаљина [захтев]")
elif b=="period":
vremeRazdaljina = int(c)
flagPeriodRaz = 1
timerRaz = 0
razdaljinaSrednja = 0
prosecnaRaz = 0
cntRaz = 0
print("Раздаљина [период]")
threading.Timer(5,CheckMail).start()
CheckMail()
while True:
data = ser.readline()
Podaci = str(data)
#print(data)
b,a = Podaci.split("b'")
NoviPodaci,c = a.split("\\r\\n")
if flagStart==1:
PodaciCnt = PodaciCnt + 1
if PodaciCnt==1:
prosecnaTemp = prosecnaTemp + float(NoviPodaci)
#print((NoviPodaci))
cntTemp+=1
if PodaciCnt==2:
prosecnaOsv = prosecnaOsv + int(NoviPodaci)
cntOsv+= 1
if PodaciCnt==3:
if proveraEnabled ==1:
if (int(NoviPodaci) < 5):
GPIO.output(LEDCrvena, GPIO.HIGH)
proveraEnabled = 0
requests.post("https://maker.ifttt.com/trigger/RAZDALJINA/with/key/<KEY>")
threading.Timer(15,CrvenaUgasi).start()
prosecnaRaz = prosecnaRaz + int(NoviPodaci)
print(int(NoviPodaci))
cntRaz+= 1
PodaciCnt=0
flagStart=0
flagPrviPodatak = 1
brojac += 0.5
timerTemp += 0.5
timerRaz += 0.5
timerOsv += 0.5
#print(brojac)
if NoviPodaci =="Novi podaci: ":
flagStart = 1
if flagZelena:
GPIO.output(LEDZelena, GPIO.HIGH)
threading.Timer(10,ZelenaUgasi).start()
flagZelena = 0
if flagPeriodTemp==1:
if (float(timerTemp) == int(vremeTemperature*60)):
temperaturaSrednja = prosecnaTemp/cntTemp
temperaturaSrednja = round(temperaturaSrednja,2)
izvestajTemperatura()
print("Слање температуре на",vremeTemperature,"минут/а")
timerTemp = 0
temperaturaSrednja = 0
prosecnaTemp = 0
cntTemp = 0
if flagPeriodRaz==1:
if (float(timerRaz) == int(vremeRazdaljina*60)):
razdaljinaSrednja = prosecnaRaz/cntRaz
razdaljinaSrednja = round(razdaljinaSrednja,2)
izvestajRazdaljina()
print("Слање раздаљине на",vremeRazdaljina,"минут/а")
timerRaz = 0
razdaljinaSrednja = 0
prosecnaRaz = 0
cntRaz = 0
if flagPeriodOsv==1:
if (float(timerOsv) == int(vremeOsvetljenosti*60)):
osvetljenostSrednja = prosecnaOsv/cntOsv
osvetljenostSrednja = round(osvetljenostSrednja,2)
izvestajOsvetljenost()
print("Слање осветљености на", vremeOsvetljenosti, "минут/а")
timerOsv = 0
osvetljenostSrednja = 0
prosecnaOsv = 0
cntOsv = 0
| [
"smtplib.SMTP",
"requests.post",
"imaplib.IMAP4_SSL",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"email.message_from_string",
"RPi.GPIO.setwarnings",
"threading.Timer",
"datetime.datetime.now",
"email.mime.multipart.MIMEMultipart",
"serial.Serial",
"RPi.GPIO.setmode",
"email.mime.text.MIMEText"
] | [((1062, 1097), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""', '(9600)'], {}), "('/dev/ttyACM0', 9600)\n", (1075, 1097), False, 'import serial\n'), ((1135, 1170), 'imaplib.IMAP4_SSL', 'imaplib.IMAP4_SSL', (['"""imap.gmail.com"""'], {}), "('imap.gmail.com')\n", (1152, 1170), False, 'import imaplib\n'), ((1207, 1229), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (1219, 1229), True, 'import RPi.GPIO as GPIO\n'), ((1231, 1254), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (1247, 1254), True, 'import RPi.GPIO as GPIO\n'), ((1256, 1287), 'RPi.GPIO.setup', 'GPIO.setup', (['LEDCrvena', 'GPIO.OUT'], {}), '(LEDCrvena, GPIO.OUT)\n', (1266, 1287), True, 'import RPi.GPIO as GPIO\n'), ((1289, 1321), 'RPi.GPIO.output', 'GPIO.output', (['LEDCrvena', 'GPIO.LOW'], {}), '(LEDCrvena, GPIO.LOW)\n', (1300, 1321), True, 'import RPi.GPIO as GPIO\n'), ((1324, 1355), 'RPi.GPIO.setup', 'GPIO.setup', (['LEDZelena', 'GPIO.OUT'], {}), '(LEDZelena, GPIO.OUT)\n', (1334, 1355), True, 'import RPi.GPIO as GPIO\n'), ((1357, 1389), 'RPi.GPIO.output', 'GPIO.output', (['LEDZelena', 'GPIO.LOW'], {}), '(LEDZelena, GPIO.LOW)\n', (1368, 1389), True, 'import RPi.GPIO as GPIO\n'), ((1414, 1446), 'RPi.GPIO.output', 'GPIO.output', (['LEDZelena', 'GPIO.LOW'], {}), '(LEDZelena, GPIO.LOW)\n', (1425, 1446), True, 'import RPi.GPIO as GPIO\n'), ((1497, 1529), 'RPi.GPIO.output', 'GPIO.output', (['LEDCrvena', 'GPIO.LOW'], {}), '(LEDCrvena, GPIO.LOW)\n', (1508, 1529), True, 'import RPi.GPIO as GPIO\n'), ((1693, 1708), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (1706, 1708), False, 'from email.mime.multipart import MIMEMultipart\n'), ((2076, 2111), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (2088, 2111), False, 'import smtplib\n'), ((2402, 2417), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (2415, 2417), False, 'from email.mime.multipart import MIMEMultipart\n'), ((2694, 2729), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (2706, 2729), False, 'import smtplib\n'), ((3021, 3036), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (3034, 3036), False, 'from email.mime.multipart import MIMEMultipart\n'), ((3306, 3341), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (3318, 3341), False, 'import smtplib\n'), ((2023, 2046), 'email.mime.text.MIMEText', 'MIMEText', (['body', '"""plain"""'], {}), "(body, 'plain')\n", (2031, 2046), False, 'from email.mime.text import MIMEText\n'), ((2650, 2673), 'email.mime.text.MIMEText', 'MIMEText', (['body', '"""plain"""'], {}), "(body, 'plain')\n", (2658, 2673), False, 'from email.mime.text import MIMEText\n'), ((3258, 3281), 'email.mime.text.MIMEText', 'MIMEText', (['body', '"""plain"""'], {}), "(body, 'plain')\n", (3266, 3281), False, 'from email.mime.text import MIMEText\n'), ((4410, 4453), 'email.message_from_string', 'email.message_from_string', (['raw_email_string'], {}), '(raw_email_string)\n', (4435, 4453), False, 'import email\n'), ((7272, 7315), 'email.message_from_string', 'email.message_from_string', (['raw_email_string'], {}), '(raw_email_string)\n', (7297, 7315), False, 'import email\n'), ((11811, 11844), 'RPi.GPIO.output', 'GPIO.output', (['LEDZelena', 'GPIO.HIGH'], {}), '(LEDZelena, GPIO.HIGH)\n', (11822, 11844), True, 'import RPi.GPIO as GPIO\n'), ((10464, 10493), 'threading.Timer', 'threading.Timer', (['(5)', 'CheckMail'], {}), '(5, CheckMail)\n', (10479, 10493), False, 'import threading\n'), ((11853, 11885), 'threading.Timer', 'threading.Timer', (['(10)', 'ZelenaUgasi'], {}), '(10, ZelenaUgasi)\n', (11868, 11885), False, 'import threading\n'), ((11103, 11136), 'RPi.GPIO.output', 'GPIO.output', (['LEDCrvena', 'GPIO.HIGH'], {}), '(LEDCrvena, GPIO.HIGH)\n', (11114, 11136), True, 'import RPi.GPIO as GPIO\n'), ((11196, 11270), 'requests.post', 'requests.post', (['"""https://maker.ifttt.com/trigger/RAZDALJINA/with/key/<KEY>"""'], {}), "('https://maker.ifttt.com/trigger/RAZDALJINA/with/key/<KEY>')\n", (11209, 11270), False, 'import requests\n'), ((7723, 7737), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7735, 7737), False, 'from datetime import datetime\n'), ((11292, 11324), 'threading.Timer', 'threading.Timer', (['(15)', 'CrvenaUgasi'], {}), '(15, CrvenaUgasi)\n', (11307, 11324), False, 'import threading\n')] |
import os
APP_SECRET_KEY = os.environ.get('APP_SECRET_KEY', '1234')
IMGUR_ID = os.environ.get('IMGUR_ID', '')
IMGUR_SECRET = os.environ.get('IMGUR_SECRET', '')
TWITTER_KEY = os.environ.get('TWITTER_KEY', '')
TWITTER_SECRET = os.environ.get('TWITTER_SECRET', '')
GOOGLE_ID = os.environ.get('GOOGLE_ID', '')
GOOGLE_SECRET = os.environ.get('GOOGLE_SECRET', '') | [
"os.environ.get"
] | [((28, 68), 'os.environ.get', 'os.environ.get', (['"""APP_SECRET_KEY"""', '"""1234"""'], {}), "('APP_SECRET_KEY', '1234')\n", (42, 68), False, 'import os\n'), ((81, 111), 'os.environ.get', 'os.environ.get', (['"""IMGUR_ID"""', '""""""'], {}), "('IMGUR_ID', '')\n", (95, 111), False, 'import os\n'), ((127, 161), 'os.environ.get', 'os.environ.get', (['"""IMGUR_SECRET"""', '""""""'], {}), "('IMGUR_SECRET', '')\n", (141, 161), False, 'import os\n'), ((177, 210), 'os.environ.get', 'os.environ.get', (['"""TWITTER_KEY"""', '""""""'], {}), "('TWITTER_KEY', '')\n", (191, 210), False, 'import os\n'), ((228, 264), 'os.environ.get', 'os.environ.get', (['"""TWITTER_SECRET"""', '""""""'], {}), "('TWITTER_SECRET', '')\n", (242, 264), False, 'import os\n'), ((278, 309), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_ID"""', '""""""'], {}), "('GOOGLE_ID', '')\n", (292, 309), False, 'import os\n'), ((326, 361), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_SECRET"""', '""""""'], {}), "('GOOGLE_SECRET', '')\n", (340, 361), False, 'import os\n')] |
from chalk import *
from colour import Color
grey = Color("#bbbbbb")
papaya = Color("#ff9700")
left_arrow = make_path([(0, 0), (1, 0)], True).reflect_x().line_width(0.03).center_xy()
def box(t):
return rectangle(1.5, 1).line_width(0.05).fill_color(papaya) + latex(t).scale(0.7)
def label(text):
return latex(text).scale(0.5).pad_b(0.4)
def arrow(text, d=True):
return label(text) // left_arrow
# Autograd 1
d = hcat([arrow(r"$f'_x(g(x))$"), box("$f$"), arrow(r"$f'_{g(x)}(g(x))$"), box("$g$"), arrow("1")], 0.2)
d.render_svg("examples/output/latex.svg", 100)
| [
"colour.Color"
] | [((54, 70), 'colour.Color', 'Color', (['"""#bbbbbb"""'], {}), "('#bbbbbb')\n", (59, 70), False, 'from colour import Color\n'), ((80, 96), 'colour.Color', 'Color', (['"""#ff9700"""'], {}), "('#ff9700')\n", (85, 96), False, 'from colour import Color\n')] |
"""
Copyright (C) 2013 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
from acs.UseCase.UseCaseBase import UseCaseBase as UCBase
class UseCaseBase(UCBase):
"""
Base class for all use case implementation
"""
def __init__(self, tc_conf, global_config):
"""
Constructor
"""
UCBase.__init__(self, tc_conf, global_config)
| [
"acs.UseCase.UseCaseBase.UseCaseBase.__init__"
] | [((859, 904), 'acs.UseCase.UseCaseBase.UseCaseBase.__init__', 'UCBase.__init__', (['self', 'tc_conf', 'global_config'], {}), '(self, tc_conf, global_config)\n', (874, 904), True, 'from acs.UseCase.UseCaseBase import UseCaseBase as UCBase\n')] |
import os
import secrets
from flask import Flask, request, redirect, url_for, send_from_directory, send_file
from werkzeug.utils import secure_filename
import subprocess
import Abbrv
UPLOAD_FOLDER = '/home/mlcruz/mysite/uploads'
DOWNLOAD_FOLDER ='/home/mlcruz/mysite/downloads'
ALLOWED_EXTENSIONS = set(['tex', 'bib'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/download/<filename>')
def download_file(filename):
return send_from_directory('/home/mlcruz/mysite/downloads',filename = "new_"+filename, attachment_filename=("new_"+filename),as_attachment=True)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if request.form.get('uncited'):
checked_uncited = 'y'
else:
checked_uncited = 'n'
if request.form.get('abbreviate'):
checked_abbreviate = 'y'
else:
checked_abbreviate = 'n'
if request.form.get('check_format'):
checked_format = 'y'
else:
checked_format = 'n'
# check if the post request has the file part
if ('file_tex' or 'file_bib') not in request.files:
return redirect(request.url)
file_tex = request.files['file_tex']
file_bib = request.files['file_bib']
# if user does not select file, browser also
# submit a empty part without filename
if (file_tex.filename or file_bib.filename) == '':
return redirect(request.url)
if (file_tex and file_bib) and allowed_file(file_tex.filename) and allowed_file(file_bib.filename):
filename_tex = secure_filename(file_tex.filename)
filename_bib = secure_filename(file_bib.filename)
salt = "" #secrets.randbelow(99999999)
tex_path_string = os.path.join(app.config['UPLOAD_FOLDER'], filename_tex)
bib_path_string = os.path.join(app.config['UPLOAD_FOLDER'], filename_bib)
tex_out_string = os.path.join(DOWNLOAD_FOLDER, (str(salt) + "new_" + filename_tex ))
bib_out_string = os.path.join(DOWNLOAD_FOLDER, (str(salt) + "new_" + filename_bib))
log_out_string = tex_out_string + ".log"
file_tex.save(tex_path_string)
file_bib.save(bib_path_string)
script_path = os.path.join(os.getcwd(),"mysite/TeXArticleFormater/articleformater/menu_unixa.py")
command_string = ["{0}".format(script_path),
"--tex_path",tex_path_string,
"--bib_path",bib_path_string,
"--tex_output_name",tex_out_string,
"--bib_output_name",bib_out_string,
"--log_file_path",log_out_string,
"--remove_uncited",checked_uncited,
"--abbreviate",checked_abbreviate,
"--format_file",checked_format,
]
#mysite/TeXArticleFormater/articleformater/menu_unix.py --tex_path "mysite/TeXArticleFormater/articleformater/comite.tex" --bib_path "mysite/TeXArticleFormater/articleformater/comite.bib" --tex_output
#_name "mysite/uploads/new.tex" --bib_output_name "mysite/uploads/new.bib" --log_file_path "mysite/uploads/new.log"
subprocess.run(command_string) # doesn't capture output
log_stringer = ""
log_file = None
try:
with open(log_out_string,"r",encoding="utf8") as tex_reader:
log_file = tex_reader.readlines()
for line in log_file:
log_stringer = log_stringer + line +"<br>"
except:
log_stringer = "Error: Subprocess exited with error. Probably some weird character or some weird entry or unbalanced brackets in the bibliography is messing things up"
#send_from_directory('/home/mlcruz/mysite/downloads',filename = "new_"+filename_bib , attachment_filename=("new_"+filename_bib),as_attachment=True)
return ('''
<!doctype html>
<title>Download files</title>
<h3>Done!</h3><br>
<h2>Download Formated Files:</h2><br>
<a target="_blank" href={0}><input type="button" value="Download TeX file"/></a>
<a target="_blank" href={1}><input type="button" value="Download Bib file"/></a>
<a target="_blank" href={2}><input type="button" value="Download Log file"/></a>
<br>
<br>
<h2>LOG:</h2>
</form>'''.format(request.url+"download/{0}".format(filename_tex),request.url+"download/{0}".format(filename_bib),request.url+"download/{0}".format(filename_tex+".log")) + log_stringer)
return '''
<!doctype html>
<title>Select tex and bib files</title>
<h1>Select tex and bib files to be formated</h1>
<form method=post enctype=multipart/form-data>
<p>tex: <input type=file name=file_tex><br>
bib: <input type=file name=file_bib><br><br>
<input type="checkbox" name="check_format" value="true" checked>Format bibliography file<br>
<input type="checkbox" name="uncited" value="true"> Remove unused bibliography entries - needs format enabled<br>
<input type="checkbox" name="abbreviate" value="true"> Abbreviate serial titles<br>
<br><br><input type=submit value=Format>
</form><br><br><br><br><br><br><br><br><br><br><br><br><br><a href="https://github.com/mlcruz/TeXArticleFormater">Project page @ github</a>'''
@app.route('/abbrv', methods=['GET', 'POST'])
def abreviate():
if request.method == 'POST':
abbreviator = Abbrv.abbrv("/home/mlcruz/mysite/TeXArticleFormater/articleformater/pickle.obj")
abreviado = abbreviator.abbreviate(request.form['abbreviate'])
return '''
<!doctype html>
<title>Abbreviate</title>
<h1>Abbreviate</h1>
<form method=post enctype=multipart/form-data>
Serial title: <input type=text name=abbreviate><br><br>
Abbreviated title:<textarea disabled>{0}</textarea>
<br><br><input type=submit value=Abbreviate>
</form>'''.format(abreviado)
return '''
<!doctype html>
<title>Abbreviate</title>
<h1>Abbreviate</h1>
<form method=post enctype=multipart/form-data>
Serial title: <input type=text name=abbreviate disable><br><br>
Abbreviated title: <textarea disabled></textarea>
<br><br><input type=submit value=Abbreviate>
</form>'''
| [
"Abbrv.abbrv",
"flask.send_from_directory",
"flask.Flask",
"subprocess.run",
"os.path.join",
"flask.request.form.get",
"flask.redirect",
"os.getcwd",
"werkzeug.utils.secure_filename"
] | [((328, 343), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (333, 343), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((601, 744), 'flask.send_from_directory', 'send_from_directory', (['"""/home/mlcruz/mysite/downloads"""'], {'filename': "('new_' + filename)", 'attachment_filename': "('new_' + filename)", 'as_attachment': '(True)'}), "('/home/mlcruz/mysite/downloads', filename='new_' +\n filename, attachment_filename='new_' + filename, as_attachment=True)\n", (620, 744), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((847, 874), 'flask.request.form.get', 'request.form.get', (['"""uncited"""'], {}), "('uncited')\n", (863, 874), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((969, 999), 'flask.request.form.get', 'request.form.get', (['"""abbreviate"""'], {}), "('abbreviate')\n", (985, 999), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((1101, 1133), 'flask.request.form.get', 'request.form.get', (['"""check_format"""'], {}), "('check_format')\n", (1117, 1133), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((5649, 5734), 'Abbrv.abbrv', 'Abbrv.abbrv', (['"""/home/mlcruz/mysite/TeXArticleFormater/articleformater/pickle.obj"""'], {}), "('/home/mlcruz/mysite/TeXArticleFormater/articleformater/pickle.obj'\n )\n", (5660, 5734), False, 'import Abbrv\n'), ((1349, 1370), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1357, 1370), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((1640, 1661), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1648, 1661), False, 'from flask import Flask, request, redirect, url_for, send_from_directory, send_file\n'), ((1797, 1831), 'werkzeug.utils.secure_filename', 'secure_filename', (['file_tex.filename'], {}), '(file_tex.filename)\n', (1812, 1831), False, 'from werkzeug.utils import secure_filename\n'), ((1859, 1893), 'werkzeug.utils.secure_filename', 'secure_filename', (['file_bib.filename'], {}), '(file_bib.filename)\n', (1874, 1893), False, 'from werkzeug.utils import secure_filename\n'), ((1976, 2031), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename_tex'], {}), "(app.config['UPLOAD_FOLDER'], filename_tex)\n", (1988, 2031), False, 'import os\n'), ((2062, 2117), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename_bib'], {}), "(app.config['UPLOAD_FOLDER'], filename_bib)\n", (2074, 2117), False, 'import os\n'), ((3331, 3361), 'subprocess.run', 'subprocess.run', (['command_string'], {}), '(command_string)\n', (3345, 3361), False, 'import subprocess\n'), ((2493, 2504), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2502, 2504), False, 'import os\n')] |
import yaml
from redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries import (
prepare_tsbs_benchmark_command,
)
def test_prepare_tsbs_benchmark_command():
with open("./tests/test_data/tsbs-scale100-cpu-max-all-1.yml", "r") as yml_file:
benchmark_config = yaml.safe_load(yml_file)
is_remote = False
command_arr, command_str = prepare_tsbs_benchmark_command(
"tsbs_load_redistimeseries",
"localhost",
6379,
benchmark_config,
".",
"/tmp/result.json",
"/tmp/data.json",
is_remote,
)
assert (
command_str
== "tsbs_load_redistimeseries --host localhost:6379 --results-file /tmp/result.json"
)
is_remote = True
for k in benchmark_config["clientconfig"]:
if "parameters" in k:
command_arr, command_str = prepare_tsbs_benchmark_command(
"tsbs_load_redistimeseries",
"localhost",
6379,
k,
".",
"/tmp/result.json",
"/tmp/data.json",
is_remote,
)
assert (
command_str
== "tsbs_load_redistimeseries --host localhost:6379 --workers 64 --file /tmp/data.json --results-file /tmp/result.json"
)
| [
"yaml.safe_load",
"redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries.prepare_tsbs_benchmark_command"
] | [((309, 333), 'yaml.safe_load', 'yaml.safe_load', (['yml_file'], {}), '(yml_file)\n', (323, 333), False, 'import yaml\n'), ((395, 554), 'redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries.prepare_tsbs_benchmark_command', 'prepare_tsbs_benchmark_command', (['"""tsbs_load_redistimeseries"""', '"""localhost"""', '(6379)', 'benchmark_config', '"""."""', '"""/tmp/result.json"""', '"""/tmp/data.json"""', 'is_remote'], {}), "('tsbs_load_redistimeseries', 'localhost', \n 6379, benchmark_config, '.', '/tmp/result.json', '/tmp/data.json',\n is_remote)\n", (425, 554), False, 'from redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries import prepare_tsbs_benchmark_command\n'), ((954, 1094), 'redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries.prepare_tsbs_benchmark_command', 'prepare_tsbs_benchmark_command', (['"""tsbs_load_redistimeseries"""', '"""localhost"""', '(6379)', 'k', '"""."""', '"""/tmp/result.json"""', '"""/tmp/data.json"""', 'is_remote'], {}), "('tsbs_load_redistimeseries', 'localhost', \n 6379, k, '.', '/tmp/result.json', '/tmp/data.json', is_remote)\n", (984, 1094), False, 'from redisbench_admin.run.tsbs_run_queries_redistimeseries.tsbs_run_queries_redistimeseries import prepare_tsbs_benchmark_command\n')] |
import requests
from .base_hub_connection import BaseHubConnection
class AuthHubConnection(BaseHubConnection):
def __init__(self, url, protocol, token, negotiate_headers):
self.token = token
self.negotiate_headers = negotiate_headers
negotiate_url = "https" + url[3:] if url.startswith("wss") else "http" + url[2:]
negotiate_url += "/negotiate"
response = requests.post(negotiate_url, headers=self.negotiate_headers)
data = response.json()
url = url + "?id={0}&access_token={1}".format(data["connectionId"], self.token)
super(AuthHubConnection, self).__init__(url, protocol)
| [
"requests.post"
] | [((403, 463), 'requests.post', 'requests.post', (['negotiate_url'], {'headers': 'self.negotiate_headers'}), '(negotiate_url, headers=self.negotiate_headers)\n', (416, 463), False, 'import requests\n')] |
import yaml, json
def getConfig(type: str=None, mid: int=None) -> json or str:
'''
获取Config
type: 获取类型
mid: 直播间号
'''
with open('config.yml', 'r', encoding='utf-8') as f:
config = yaml.load(f,Loader=yaml.FullLoader)
f.close()
if type == None:
config = {
'ffmpeg': {
'name': config['ffmpeg']['name'],
'outPath': config['ffmpeg']['outPath'],
'videoPath': config['ffmpeg']['videoPath']
},
'liveList': {
'name': '['+config['liveList'][mid]['name']+']',
'mid': str(config['liveList'][mid]['mid']),
'rid': str(config['liveList'][mid]['rid'])
}
}
elif type == 'SendMessage':
config = {
'ServerChanSendKey': config['SendMessage']['ServerChanSendKey'],
'DingDing': {
'accessToken': config['SendMessage']['DingDing']['accessToken'],
'secret': config['SendMessage']['DingDing']['secret']
}
}
elif type == 'logs':
config = config['logsPath']
return config
| [
"yaml.load"
] | [((217, 253), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (226, 253), False, 'import yaml, json\n')] |
import subprocess
import os
import shlex
import shutil
import win32file
import pywintypes
def changeFileCreateTime(path, ctime):
handle = win32file.CreateFile(path, win32file.GENERIC_WRITE , 0 , None , win32file.OPEN_EXISTING , 0 , 0)
PyTime = pywintypes.Time(ctime)
win32file.SetFileTime(handle,PyTime)
path = "D:/Test"
dir = os.path.join(path, '2022')
def makeFiles():
"""
├── D:/
│ ├── Test/
│── file.txt 4/30/2022
│── file (1).txt 4/30/2021
│── 2022
│── file.txt 4/30/2022
"""
os.mkdir(path)
with open(os.path.join(path, 'file.txt'), mode='a'): pass
with open(os.path.join(path, 'file (1).txt'), mode='a'): pass
changeFileCreateTime(os.path.join(path, 'file.txt'), 1651320922)
changeFileCreateTime(os.path.join(path, 'file (1).txt'), 1619784922)
os.mkdir(dir)
with open(os.path.join(dir, 'file.txt'), mode='a'): pass
changeFileCreateTime(os.path.join(dir, 'file.txt'), 1651320922)
def year():
cmd = "Categorize year --path D:/Test --verbose"
cmdList=shlex.split(cmd)
try:
subprocess.run(cmdList, check=True, capture_output=True, text=True, shell=True)
except subprocess.CalledProcessError as error:
print(error.stdout)
print(error.stderr)
raise error
def check_files():
"""
expected
├── D:/
│ ├── Test/
│── 2022
│── file.txt 4/30/2022
│── file.txt (1) 4/30/2022
│── 2021
│── file (1).txt 4/30/2021
"""
files = []
expected = ['2021', '2022']
for file in os.listdir(path):
files.append(file)
assert check_if_equal(files, expected)
def check_if_equal(list_1, list_2):
if len(list_1) != len(list_2):
return False
return sorted(list_1) == sorted(list_2)
def test_main():
makeFiles()
year()
check_files()
shutil.rmtree(path)
if __name__ == "__main__":
test_main()
pass
| [
"os.listdir",
"win32file.CreateFile",
"shlex.split",
"subprocess.run",
"os.path.join",
"os.mkdir",
"shutil.rmtree",
"win32file.SetFileTime",
"pywintypes.Time"
] | [((341, 367), 'os.path.join', 'os.path.join', (['path', '"""2022"""'], {}), "(path, '2022')\n", (353, 367), False, 'import os\n'), ((143, 239), 'win32file.CreateFile', 'win32file.CreateFile', (['path', 'win32file.GENERIC_WRITE', '(0)', 'None', 'win32file.OPEN_EXISTING', '(0)', '(0)'], {}), '(path, win32file.GENERIC_WRITE, 0, None, win32file.\n OPEN_EXISTING, 0, 0)\n', (163, 239), False, 'import win32file\n'), ((253, 275), 'pywintypes.Time', 'pywintypes.Time', (['ctime'], {}), '(ctime)\n', (268, 275), False, 'import pywintypes\n'), ((280, 317), 'win32file.SetFileTime', 'win32file.SetFileTime', (['handle', 'PyTime'], {}), '(handle, PyTime)\n', (301, 317), False, 'import win32file\n'), ((570, 584), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (578, 584), False, 'import os\n'), ((859, 872), 'os.mkdir', 'os.mkdir', (['dir'], {}), '(dir)\n', (867, 872), False, 'import os\n'), ((1080, 1096), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (1091, 1096), False, 'import shlex\n'), ((1630, 1646), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1640, 1646), False, 'import os\n'), ((1922, 1941), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (1935, 1941), False, 'import shutil\n'), ((738, 768), 'os.path.join', 'os.path.join', (['path', '"""file.txt"""'], {}), "(path, 'file.txt')\n", (750, 768), False, 'import os\n'), ((807, 841), 'os.path.join', 'os.path.join', (['path', '"""file (1).txt"""'], {}), "(path, 'file (1).txt')\n", (819, 841), False, 'import os\n'), ((959, 988), 'os.path.join', 'os.path.join', (['dir', '"""file.txt"""'], {}), "(dir, 'file.txt')\n", (971, 988), False, 'import os\n'), ((1114, 1193), 'subprocess.run', 'subprocess.run', (['cmdList'], {'check': '(True)', 'capture_output': '(True)', 'text': '(True)', 'shell': '(True)'}), '(cmdList, check=True, capture_output=True, text=True, shell=True)\n', (1128, 1193), False, 'import subprocess\n'), ((599, 629), 'os.path.join', 'os.path.join', (['path', '"""file.txt"""'], {}), "(path, 'file.txt')\n", (611, 629), False, 'import os\n'), ((661, 695), 'os.path.join', 'os.path.join', (['path', '"""file (1).txt"""'], {}), "(path, 'file (1).txt')\n", (673, 695), False, 'import os\n'), ((887, 916), 'os.path.join', 'os.path.join', (['dir', '"""file.txt"""'], {}), "(dir, 'file.txt')\n", (899, 916), False, 'import os\n')] |
"""Test Cases"""
from dataStructures.data_types_bst import BinarySearchTree
from dataStructures.bst_from_sorted_array import bst_from_sorted_array
def test_bst_from_sorted_array():
l = [1, 2, 3, 4, 5, 6, 7, 8, 9]
root = bst_from_sorted_array(l)
tree = BinarySearchTree(root)
assert [node.key for node in tree] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
| [
"dataStructures.bst_from_sorted_array.bst_from_sorted_array",
"dataStructures.data_types_bst.BinarySearchTree"
] | [((232, 256), 'dataStructures.bst_from_sorted_array.bst_from_sorted_array', 'bst_from_sorted_array', (['l'], {}), '(l)\n', (253, 256), False, 'from dataStructures.bst_from_sorted_array import bst_from_sorted_array\n'), ((268, 290), 'dataStructures.data_types_bst.BinarySearchTree', 'BinarySearchTree', (['root'], {}), '(root)\n', (284, 290), False, 'from dataStructures.data_types_bst import BinarySearchTree\n')] |
from context import HS
from helpers import timedBoardSolve, timedBoardDiffSolve
for diff in range(4):
try:
timedBoardSolve(HS(diff, 11), 10)
except:
print(f"Failed difficulty {diff}")
# Times vary a lot w/in difficulty!
for diff in range(4):
try:
timedBoardDiffSolve(HS, diff, 10)
except:
print(f"Failed difficulty {diff}")
| [
"context.HS",
"helpers.timedBoardDiffSolve"
] | [((285, 318), 'helpers.timedBoardDiffSolve', 'timedBoardDiffSolve', (['HS', 'diff', '(10)'], {}), '(HS, diff, 10)\n', (304, 318), False, 'from helpers import timedBoardSolve, timedBoardDiffSolve\n'), ((136, 148), 'context.HS', 'HS', (['diff', '(11)'], {}), '(diff, 11)\n', (138, 148), False, 'from context import HS\n')] |
import numpy as np
from rubin_sim.utils import Site, _approx_altAz2RaDec, _approx_altaz2pa, _approx_RaDec2AltAz
from rubin_sim.scheduler.utils import smallest_signed_angle
__all__ = ["Kinem_model"]
TwoPi = 2.*np.pi
class radec2altazpa(object):
"""Class to make it easy to swap in different alt/az conversion if wanted
"""
def __init__(self, location):
self.location = location
def __call__(self, ra, dec, mjd):
alt, az, pa = _approx_RaDec2AltAz(ra, dec, self.location.lat_rad, self.location.lon_rad, mjd,
return_pa=True)
return alt, az, pa
def _getRotSkyPos(paRad, rotTelRad):
"""
Paramteres
----------
paRad : float or array
The parallactic angle
"""
return (rotTelRad - paRad) % TwoPi
def _getRotTelPos(paRad, rotSkyRad):
"""Make it run from -180 to 180
"""
result = (rotSkyRad + paRad) % TwoPi
return result
class Kinem_model(object):
"""
A Kinematic model of the telescope.
Parameters
----------
location : `astropy.coordinates.EarthLocation`
The location of the telescope. If None, defaults to rubin_sim.utils.Site info
park_alt : `float` (86.5)
The altitude the telescope gets parked at (degrees)
park_az : `float` (0)
The azimuth for telescope park position (degrees)
start_filter : `str` ('r')
The filter that gets loaded when the telescope is parked
mjd0 : `float` (0)
The MJD to assume we are starting from
Note there are additional parameters in the methods setup_camera, setup_dome, setup_telescope,
and setup_optics. Just breaking it up a bit to make it more readable.
"""
def __init__(self, location=None, park_alt=86.5, park_az=0., start_filter='r', mjd0=0):
self.park_alt_rad = np.radians(park_alt)
self.park_az_rad = np.radians(park_az)
self.current_filter = start_filter
if location is None:
self.location = Site('LSST')
self.location.lat_rad = np.radians(self.location.latitude)
self.location.lon_rad = np.radians(self.location.longitude)
# Our RA,Dec to Alt,Az converter
self.radec2altaz = radec2altazpa(self.location)
self.setup_camera()
self.setup_dome()
self.setup_telescope()
self.setup_optics()
# Park the telescope
self.park()
self.last_mjd = mjd0
def mount_filters(self, filter_list):
"""Change which filters are mounted
Parameters
----------
filter_list : `list` [`str`]
List of the mounted filters.
"""
self.mounted_filters = filter_list
def setup_camera(self, readtime=2., shuttertime=1., filter_changetime=120., fov=3.5,
rotator_min=-90, rotator_max=90, maxspeed=3.5, accel=1.0, decel=1.0):
"""
Parameters
----------
readtime : `float` (2)
The readout time of the CCDs (seconds)
shuttertime : `float` (1.)
The time it takes the shutter to go from closed to fully open (seconds)
filter_changetime : `float` (120)
The time it takes to change filters (seconds)
fov : `float` (3.5)
The camera field of view (degrees)
rotator_min : `float` (-90)
The minimum angle the camera rotator can move to (degrees)
rotator_max : `float` (90)
The maximum angle the camera rotator can move to (degrees)
maxspeed : `float` (3.5)
The maximum speed of the rotator (degrees/s)
accel : `float` (1.0)
The acceleration of the rotator (degrees/s^2)
"""
self.readtime = readtime
self.shuttertime = shuttertime
self.filter_changetime = filter_changetime
self.camera_fov = np.radians(fov)
self.telrot_minpos_rad = np.radians(rotator_min)
self.telrot_maxpos_rad = np.radians(rotator_max)
self.telrot_maxspeed_rad = np.radians(maxspeed)
self.telrot_accel_rad = np.radians(accel)
self.telrot_decel_rad = np.radians(decel)
self.mounted_filters = ['u', 'g', 'r', 'i', 'y']
def setup_dome(self, altitude_maxspeed=1.75, altitude_accel=0.875, altitude_decel=0.875,
altitude_freerange=0., azimuth_maxspeed=1.5, azimuth_accel=0.75,
azimuth_decel=0.75, azimuth_freerange=4.0, settle_time=1.0):
"""Parameters to define the DOME movement.
Parameters
----------
altitude_maxspeed : `float` (1.75)
Maximum speed for altitude movement (degrees/second)
altitude_accel : `float` (0.875)
Maximum acceleration for altitude movement (degrees/second**2)
altitude_decel : `float` (0.875)
Maximum deceleration for altitude movement (degrees/second**2)
altitude_freerange : `float` (0)
The range over which there is 0 delay
azimuth_maxspeed : `float` (1.5)
Maximum speed for azimuth movement (degrees/second)
azimuth_accel : `float` (0.75)
Maximum acceleration for azimuth movement (degrees/second**2)
azimuth_decel : `float` (0.75)
Maximum deceleration for azimuth movement (degrees/second**2)
azimuth_freerange : `float` (4.0)
The range in which there is 0 delay
settle_time : `float` (1.0)
Settle time after movement (seconds)
"""
self.domalt_maxspeed_rad = np.radians(altitude_maxspeed)
self.domalt_accel_rad = np.radians(altitude_accel)
self.domalt_decel_rad = np.radians(altitude_decel)
self.domalt_free_range = np.radians(altitude_freerange)
self.domaz_maxspeed_rad = np.radians(azimuth_maxspeed)
self.domaz_accel_rad = np.radians(azimuth_accel)
self.domaz_decel_rad = np.radians(azimuth_decel)
self.domaz_free_range = np.radians(azimuth_freerange)
self.domaz_settletime = settle_time
def setup_telescope(self, altitude_minpos=20.0, altitude_maxpos=86.5,
azimuth_minpos=-270.0, azimuth_maxpos=270.0, altitude_maxspeed=3.5,
altitude_accel=3.5, altitude_decel=3.5, azimuth_maxspeed=7.0,
azimuth_accel=7.0, azimuth_decel=7.0, settle_time=3.0):
"""Parameters to define the TELESCOPE movement and position.
Parameters
----------
altitude_minpos : `float` (20.0)
Minimum altitude for the telescope (degrees)
altitude_maxpos : `float` (86.5)
Maximum altitude for the telescope (degrees)
azimuth_minpos : `float` (-270.0)
Minimum azimuth position (degrees)
azimuth_maxpos : `float` (270.0)
Maximum azimuth position (degrees)
altitude_maxspeed : `float` (3.5)
Maximum speed for altitude movement (degrees/second)
altitude_accel : `float` (3.5)
Maximum acceleration for altitude movement (degrees/second**2)
altitude_decel : `float` (3.5)
Maximum deceleration for altitude movement (degrees/second**2)
azimuth_maxspeed : `float` (7.0)
Maximum speed for azimuth movement (degrees/second)
azimuth_accel : `float` (7.0)
Maximum acceleration for azimuth movement (degrees/second**2)
azimuth_decel : `float` (7.0)
Maximum deceleration for azimuth movement (degrees/second**2)
settle_time : `float` (3.0)
Settle time required for telescope after movement (seconds)
"""
self.telalt_minpos_rad = np.radians(altitude_minpos)
self.telalt_maxpos_rad = np.radians(altitude_maxpos)
self.telaz_minpos_rad = np.radians(azimuth_minpos)
self.telaz_maxpos_rad = np.radians(azimuth_maxpos)
self.telalt_maxspeed_rad = np.radians(altitude_maxspeed)
self.telalt_accel_rad = np.radians(altitude_accel)
self.telalt_decel_rad = np.radians(altitude_decel)
self.telaz_maxspeed_rad = np.radians(azimuth_maxspeed)
self.telaz_accel_rad = np.radians(azimuth_accel)
self.telaz_decel_rad = np.radians(azimuth_decel)
self.mount_settletime = settle_time
def setup_optics(self, ol_slope=1.0/3.5, cl_delay=[0.0, 36.], cl_altlimit=[0.0, 9.0, 90.0]):
"""
Parameters
----------
ol_slope : `float` (1.0/3.5)
seconds/degree in altitude slew.
cl_delay : list ([0.0, 36])
The delays for closed optics loops (seconds)
cl_altlimit : list ([0.0, 9.0, 90.0])
The altitude limits (degrees) for performing closed optice loops.
Should be one element longer than cl_delay.
A given movement in altitude will cover X degrees; if X > cl_altlimit[i] there is
an additional delay of cl_delay[i]
"""
self.optics_ol_slope = ol_slope/np.radians(1.) # ah, 1./np.radians(1)=np.pi/180
self.optics_cl_delay = cl_delay
self.optics_cl_altlimit = np.radians(cl_altlimit)
def park(self):
"""Put the telescope in the park position.
"""
# I'm going to ignore that the old model had the dome altitude at 90
# and telescope altitude 86 for park.
# We should usually be dome az limited anyway, so this should be a negligible approximation.
self.parked = True
# We have no current position we are tracking
self.current_RA_rad = None
self.current_dec_rad = None
self.current_rotSkyPos_rad = None
self.cumulative_azimuth_rad = 0
# The last position we were at (or the current if we are parked)
self.last_az_rad = self.park_az_rad
self.last_alt_rad = self.park_alt_rad
self.last_rot_tel_pos_rad = 0
def current_alt_az(self, mjd):
"""return the current alt az position that we have tracked to.
"""
if self.parked:
return self.last_alt_rad, self.last_az_rad, self.last_rot_tel_pos_rad
else:
alt_rad, az_rad, pa = self.radec2altaz(self.current_RA_rad, self.current_dec_rad, mjd)
rotTelPos = _getRotTelPos(pa, self.last_rot_tel_pos_rad)
return alt_rad, az_rad, rotTelPos
def _uamSlewTime(self, distance, vmax, accel):
"""Compute slew time delay assuming uniform acceleration (for any component).
If you accelerate uniformly to vmax, then slow down uniformly to zero, distance traveled is
d = vmax**2 / accel
To travel distance d while accelerating/decelerating at rate a, time required is t = 2 * sqrt(d / a)
If hit vmax, then time to acceleration to/from vmax is 2*vmax/a and distance in those
steps is vmax**2/a. The remaining distance is (d - vmax^2/a) and time needed is (d - vmax^2/a)/vmax
This method accepts arrays of distance, and assumes acceleration == deceleration.
Parameters
----------
distance : numpy.ndarray
Distances to travel. Must be positive value.
vmax : float
Max velocity
accel : float
Acceleration (and deceleration)
Returns
-------
numpy.ndarray
"""
dm = vmax**2 / accel
slewTime = np.where(distance < dm, 2 * np.sqrt(distance / accel),
2 * vmax / accel + (distance - dm) / vmax)
return slewTime
def slew_times(self, ra_rad, dec_rad, mjd, rotSkyPos=None, rotTelPos=None, filtername='r',
lax_dome=True, alt_rad=None, az_rad=None, starting_alt_rad=None, starting_az_rad=None,
starting_rotTelPos_rad=None, update_tracking=False, include_readtime=True):
"""Calculates ``slew'' time to a series of alt/az/filter positions from the current
position (stored internally).
Assumptions (currently):
Assumes we have been tracking on ra,dec,rotSkyPos position.
Ignores the motion of the sky while we are slewing
(this approx should probably average out over time).
No checks for if we have tracked beyond limits.
(this assumes folks put telescope in park if there's a long gap.)
Assumes the camera rotator never needs to (or can't) do a slew over 180 degrees.
Calculates the ``slew'' time necessary to get from current state
to alt2/az2/filter2. The time returned is actually the time between
the end of an exposure at current location and the beginning of an exposure
at alt2/az2, since it includes readout time in the ``slew'' time.
Parameters
----------
ra_rad : `np.ndarray`
The RA(s) of the location(s) we wish to slew to (radians)
dec_rad : `np.ndarray`
The declination(s) of the location(s) we wish to slew to (radians)
mjd : `float`
The current moodified julian date (days)
rotSkyPos : `np.ndarray`
The desired rotSkyPos(s) (radians). Angle between up on the chip and North. Note,
it is possible to set a rotSkyPos outside the allowed camera rotator range, in which case
the slewtime will be np.inf. If both rotSkyPos and rotTelPos are set, rotTelPos will be used.
rotTelPos : `np.ndarray`
The desired rotTelPos(s) (radians).
filtername : `str`
The filter(s) of the desired observations.
Set to None to compute only telescope and dome motion times.
alt_rad : `np.ndarray`
The altitude(s) of the destination pointing(s) (radians).
Will override ra_rad,dec_rad if provided.
az_rad : `np.ndarray`
The azimuth(s) of the destination pointing(s) (radians).
Will override ra_rad,dec_rad if provided.
lax_dome : `bool`, default True
If True, allow the dome to creep, model a dome slit, and don't
require the dome to settle in azimuth. If False, adhere to the way
SOCS calculates slew times (as of June 21 2017) and do not allow dome creep.
starting_alt_rad : `float` (None)
The starting altitude for the slew (radians).
If None, will use internally stored last pointing.
starting_az_rad : `float` (None)
The starting azimuth for the slew (radians).
If None, will use internally stored last pointing.
starting_rotTelPos_rad : `float` (None)
The starting camera rotation for the slew (radians).
If None, will use internally stored last pointing.
update_tracking : `bool` (False)
If True, update the internal attributes to say we are tracking the
specified RA,Dec,RotSkyPos position.
include_readtime : `bool` (True)
Assume the camera must be read before opening the shutter,
and include that readtime in the returned slewtime.
Readtime will never be included if the telescope was parked before the slew.
Returns
-------
slewTime : `np.ndarray`
The number of seconds between the two specified exposures.
Will be np.nan or np.inf if slew is not possible.
"""
if filtername not in self.mounted_filters:
return np.nan
# Don't trust folks to do pa calculation correctly, if both rotations set, rotSkyPos wins
if (rotTelPos is not None) & (rotSkyPos is not None):
if np.isfinite(rotTelPos):
rotSkyPos = None
else:
rotTelPos = None
# alt,az not provided, calculate from RA,Dec
if alt_rad is None:
alt_rad, az_rad, pa = self.radec2altaz(ra_rad, dec_rad, mjd)
else:
pa = _approx_altaz2pa(alt_rad, az_rad, self.location.lat_rad)
if update_tracking:
ra_rad, dec_rad = _approx_altAz2RaDec(alt_rad, az_rad, self.location.lat_rad,
self.location.lon_rad, mjd)
if starting_alt_rad is None:
if self.parked:
starting_alt_rad = self.park_alt_rad
starting_az_rad = self.park_az_rad
else:
starting_alt_rad, starting_az_rad, starting_pa = self.radec2altaz(self.current_RA_rad,
self.current_dec_rad, mjd)
deltaAlt = np.abs(alt_rad - starting_alt_rad)
delta_az_short = smallest_signed_angle(starting_az_rad, az_rad)
delta_az_long = delta_az_short - TwoPi
daslz = np.where(delta_az_short < 0)[0]
delta_az_long[daslz] = TwoPi + delta_az_short[daslz]
azlz = np.where(delta_az_short < 0)[0]
delta_az_long[azlz] = TwoPi + delta_az_short[azlz]
# So, for every position, we can get there by slewing long or short way
cummulative_az_short = delta_az_short + self.cumulative_azimuth_rad
oob = np.where((cummulative_az_short < self.telaz_minpos_rad) |
(cummulative_az_short > self.telaz_maxpos_rad))[0]
# Set out of bounds azimuths to infinite distance
delta_az_short[oob] = np.inf
cummulative_az_long = delta_az_long + self.cumulative_azimuth_rad
oob = np.where((cummulative_az_long < self.telaz_minpos_rad) |
(cummulative_az_long > self.telaz_maxpos_rad))[0]
delta_az_long[oob] = np.inf
# Taking minimum of abs, so only possible azimuths slews should show up.
# And deltaAz is signed properly.
stacked_az = np.vstack([delta_az_short, delta_az_long])
indx = np.argmin(np.abs(stacked_az), axis=0)
deltaAztel = np.take_along_axis(stacked_az, np.expand_dims(indx, axis=0), axis=0).squeeze(axis=0)
# Calculate how long the telescope will take to slew to this position.
telAltSlewTime = self._uamSlewTime(deltaAlt, self.telalt_maxspeed_rad,
self.telalt_accel_rad)
telAzSlewTime = self._uamSlewTime(np.abs(deltaAztel), self.telaz_maxspeed_rad,
self.telaz_accel_rad)
totTelTime = np.maximum(telAltSlewTime, telAzSlewTime)
# Time for open loop optics correction
olTime = deltaAlt / self.optics_ol_slope
totTelTime += olTime
# Add time for telescope settle.
# note, this means we're going to have a settle time even for very small slews like dithering.
settleAndOL = np.where(totTelTime > 0)
totTelTime[settleAndOL] += np.maximum(0, self.mount_settletime - olTime[settleAndOL])
# And readout puts a floor on tel time
if include_readtime:
totTelTime = np.maximum(self.readtime, totTelTime)
# now compute dome slew time
# the dome can spin all the way around, so we will let it go the shortest angle,
# even if the telescope has to unwind
deltaAz = np.abs(smallest_signed_angle(starting_az_rad, az_rad))
if lax_dome:
# model dome creep, dome slit, and no azimuth settle
# if we can fit both exposures in the dome slit, do so
sameDome = np.where(deltaAlt ** 2 + deltaAz ** 2 < self.camera_fov ** 2)
# else, we take the minimum time from two options:
# 1. assume we line up alt in the center of the dome slit so we
# minimize distance we have to travel in azimuth.
# 2. line up az in the center of the slit
# also assume:
# * that we start out going maxspeed for both alt and az
# * that we only just barely have to get the new field in the
# dome slit in one direction, but that we have to center the
# field in the other (which depends which of the two options used)
# * that we don't have to slow down until after the shutter
# starts opening
domDeltaAlt = deltaAlt
# on each side, we can start out with the dome shifted away from
# the center of the field by an amount domSlitRadius - fovRadius
domSlitDiam = self.camera_fov / 2.0
domDeltaAz = deltaAz - 2 * (domSlitDiam / 2 - self.camera_fov / 2)
domAltSlewTime = domDeltaAlt / self.domalt_maxspeed_rad
domAzSlewTime = domDeltaAz / self.domaz_maxspeed_rad
totDomTime1 = np.maximum(domAltSlewTime, domAzSlewTime)
domDeltaAlt = deltaAlt - 2 * (domSlitDiam / 2 - self.camera_fov / 2)
domDeltaAz = deltaAz
domAltSlewTime = domDeltaAlt / self.domalt_maxspeed_rad
domAzSlewTime = domDeltaAz / self.domaz_maxspeed_rad
totDomTime2 = np.maximum(domAltSlewTime, domAzSlewTime)
totDomTime = np.minimum(totDomTime1, totDomTime2)
totDomTime[sameDome] = 0
else:
# the above models a dome slit and dome creep. However, it appears that
# SOCS requires the dome to slew exactly to each field and settle in az
domAltSlewTime = self._uamSlewTime(deltaAlt, self.domalt_maxspeed_rad,
self.domalt_accel_rad)
domAzSlewTime = self._uamSlewTime(deltaAz, self.domaz_maxspeed_rad,
self.domaz_accel_rad)
# Dome takes 1 second to settle in az
domAzSlewTime = np.where(domAzSlewTime > 0,
domAzSlewTime + self.domaz_settletime,
domAzSlewTime)
totDomTime = np.maximum(domAltSlewTime, domAzSlewTime)
# Find the max of the above for slew time.
slewTime = np.maximum(totTelTime, totDomTime)
# include filter change time if necessary
filterChange = np.where(filtername != self.current_filter)
slewTime[filterChange] = np.maximum(slewTime[filterChange],
self.filter_changetime)
# Add closed loop optics correction
# Find the limit where we must add the delay
cl_limit = self.optics_cl_altlimit[1]
cl_delay = self.optics_cl_delay[1]
closeLoop = np.where(deltaAlt >= cl_limit)
slewTime[closeLoop] += cl_delay
# Mask min/max altitude limits so slewtime = np.nan
outsideLimits = np.where((alt_rad > self.telalt_maxpos_rad) |
(alt_rad < self.telalt_minpos_rad))[0]
slewTime[outsideLimits] = np.nan
# If we want to include the camera rotation time
if (rotSkyPos is not None) | (rotTelPos is not None):
if rotTelPos is None:
rotTelPos = _getRotTelPos(pa, rotSkyPos)
if rotSkyPos is None:
rotSkyPos = _getRotSkyPos(pa, rotTelPos)
# If the new rotation angle would move us out of the limits, return nan
rotTelPos_ranged = rotTelPos+0
over = np.where(rotTelPos > np.pi)[0]
rotTelPos_ranged[over] -= TwoPi
if (rotTelPos_ranged < self.telrot_minpos_rad) | (rotTelPos_ranged > self.telrot_maxpos_rad):
return np.nan
# If there was no kwarg for starting rotator position
if starting_rotTelPos_rad is None:
# If there is no current rotSkyPos, we were parked
if self.current_rotSkyPos_rad is None:
current_rotTelPos = self.last_rot_tel_pos_rad
else:
# We have been tracking, so rotTelPos needs to be updated
current_rotTelPos = _getRotTelPos(pa, self.current_rotSkyPos_rad)
else:
# kwarg overrides if it was supplied
current_rotTelPos = starting_rotTelPos_rad
deltaRotation = np.abs(smallest_signed_angle(current_rotTelPos, rotTelPos))
rotator_time = self._uamSlewTime(deltaRotation, self.telrot_maxspeed_rad, self.telrot_accel_rad)
slewTime = np.maximum(slewTime, rotator_time)
# Update the internal attributes to note that we are now pointing and tracking
# at the requested RA,Dec,rotSkyPos
if update_tracking:
self.current_RA_rad = ra_rad
self.current_dec_rad = dec_rad
self.current_rotSkyPos_rad = rotSkyPos
self.parked = False
# Handy to keep as reference, but not used for any calculations
self.last_rot_tel_pos_rad = rotTelPos
self.last_az_rad = az_rad
self.last_alt_rad = alt_rad
self.last_pa_rad = pa
# Track the cumulative azimuth
self.cumulative_azimuth_rad += deltaAztel
self.current_filter = filtername
self.last_mjd = mjd
return slewTime
def visit_time(self, observation):
# How long does it take to make an observation. Assume final read can be done during next slew.
visit_time = observation['exptime'] + \
observation['nexp'] * self.shuttertime + \
max(observation['nexp'] - 1, 0) * self.readtime
return visit_time
def observe(self, observation, mjd, rotTelPos=None, lax_dome=True):
"""observe a target, and return the slewtime and visit time for the action
If slew is not allowed, returns np.nan and does not update state.
"""
slewtime = self.slew_times(observation['RA'], observation['dec'],
mjd, rotSkyPos=observation['rotSkyPos'],
rotTelPos=rotTelPos,
filtername=observation['filter'], update_tracking=True,
lax_dome=lax_dome)
visit_time = self.visit_time(observation)
return slewtime, visit_time
| [
"numpy.radians",
"numpy.abs",
"rubin_sim.utils._approx_RaDec2AltAz",
"numpy.sqrt",
"numpy.minimum",
"numpy.where",
"rubin_sim.utils._approx_altaz2pa",
"rubin_sim.utils.Site",
"numpy.isfinite",
"numpy.vstack",
"numpy.expand_dims",
"numpy.maximum",
"rubin_sim.scheduler.utils.smallest_signed_angle",
"rubin_sim.utils._approx_altAz2RaDec"
] | [((461, 560), 'rubin_sim.utils._approx_RaDec2AltAz', '_approx_RaDec2AltAz', (['ra', 'dec', 'self.location.lat_rad', 'self.location.lon_rad', 'mjd'], {'return_pa': '(True)'}), '(ra, dec, self.location.lat_rad, self.location.lon_rad,\n mjd, return_pa=True)\n', (480, 560), False, 'from rubin_sim.utils import Site, _approx_altAz2RaDec, _approx_altaz2pa, _approx_RaDec2AltAz\n'), ((1836, 1856), 'numpy.radians', 'np.radians', (['park_alt'], {}), '(park_alt)\n', (1846, 1856), True, 'import numpy as np\n'), ((1884, 1903), 'numpy.radians', 'np.radians', (['park_az'], {}), '(park_az)\n', (1894, 1903), True, 'import numpy as np\n'), ((3868, 3883), 'numpy.radians', 'np.radians', (['fov'], {}), '(fov)\n', (3878, 3883), True, 'import numpy as np\n'), ((3918, 3941), 'numpy.radians', 'np.radians', (['rotator_min'], {}), '(rotator_min)\n', (3928, 3941), True, 'import numpy as np\n'), ((3975, 3998), 'numpy.radians', 'np.radians', (['rotator_max'], {}), '(rotator_max)\n', (3985, 3998), True, 'import numpy as np\n'), ((4034, 4054), 'numpy.radians', 'np.radians', (['maxspeed'], {}), '(maxspeed)\n', (4044, 4054), True, 'import numpy as np\n'), ((4087, 4104), 'numpy.radians', 'np.radians', (['accel'], {}), '(accel)\n', (4097, 4104), True, 'import numpy as np\n'), ((4137, 4154), 'numpy.radians', 'np.radians', (['decel'], {}), '(decel)\n', (4147, 4154), True, 'import numpy as np\n'), ((5544, 5573), 'numpy.radians', 'np.radians', (['altitude_maxspeed'], {}), '(altitude_maxspeed)\n', (5554, 5573), True, 'import numpy as np\n'), ((5606, 5632), 'numpy.radians', 'np.radians', (['altitude_accel'], {}), '(altitude_accel)\n', (5616, 5632), True, 'import numpy as np\n'), ((5665, 5691), 'numpy.radians', 'np.radians', (['altitude_decel'], {}), '(altitude_decel)\n', (5675, 5691), True, 'import numpy as np\n'), ((5725, 5755), 'numpy.radians', 'np.radians', (['altitude_freerange'], {}), '(altitude_freerange)\n', (5735, 5755), True, 'import numpy as np\n'), ((5790, 5818), 'numpy.radians', 'np.radians', (['azimuth_maxspeed'], {}), '(azimuth_maxspeed)\n', (5800, 5818), True, 'import numpy as np\n'), ((5850, 5875), 'numpy.radians', 'np.radians', (['azimuth_accel'], {}), '(azimuth_accel)\n', (5860, 5875), True, 'import numpy as np\n'), ((5907, 5932), 'numpy.radians', 'np.radians', (['azimuth_decel'], {}), '(azimuth_decel)\n', (5917, 5932), True, 'import numpy as np\n'), ((5965, 5994), 'numpy.radians', 'np.radians', (['azimuth_freerange'], {}), '(azimuth_freerange)\n', (5975, 5994), True, 'import numpy as np\n'), ((7670, 7697), 'numpy.radians', 'np.radians', (['altitude_minpos'], {}), '(altitude_minpos)\n', (7680, 7697), True, 'import numpy as np\n'), ((7731, 7758), 'numpy.radians', 'np.radians', (['altitude_maxpos'], {}), '(altitude_maxpos)\n', (7741, 7758), True, 'import numpy as np\n'), ((7791, 7817), 'numpy.radians', 'np.radians', (['azimuth_minpos'], {}), '(azimuth_minpos)\n', (7801, 7817), True, 'import numpy as np\n'), ((7850, 7876), 'numpy.radians', 'np.radians', (['azimuth_maxpos'], {}), '(azimuth_maxpos)\n', (7860, 7876), True, 'import numpy as np\n'), ((7912, 7941), 'numpy.radians', 'np.radians', (['altitude_maxspeed'], {}), '(altitude_maxspeed)\n', (7922, 7941), True, 'import numpy as np\n'), ((7974, 8000), 'numpy.radians', 'np.radians', (['altitude_accel'], {}), '(altitude_accel)\n', (7984, 8000), True, 'import numpy as np\n'), ((8033, 8059), 'numpy.radians', 'np.radians', (['altitude_decel'], {}), '(altitude_decel)\n', (8043, 8059), True, 'import numpy as np\n'), ((8094, 8122), 'numpy.radians', 'np.radians', (['azimuth_maxspeed'], {}), '(azimuth_maxspeed)\n', (8104, 8122), True, 'import numpy as np\n'), ((8154, 8179), 'numpy.radians', 'np.radians', (['azimuth_accel'], {}), '(azimuth_accel)\n', (8164, 8179), True, 'import numpy as np\n'), ((8211, 8236), 'numpy.radians', 'np.radians', (['azimuth_decel'], {}), '(azimuth_decel)\n', (8221, 8236), True, 'import numpy as np\n'), ((9093, 9116), 'numpy.radians', 'np.radians', (['cl_altlimit'], {}), '(cl_altlimit)\n', (9103, 9116), True, 'import numpy as np\n'), ((16528, 16562), 'numpy.abs', 'np.abs', (['(alt_rad - starting_alt_rad)'], {}), '(alt_rad - starting_alt_rad)\n', (16534, 16562), True, 'import numpy as np\n'), ((16588, 16634), 'rubin_sim.scheduler.utils.smallest_signed_angle', 'smallest_signed_angle', (['starting_az_rad', 'az_rad'], {}), '(starting_az_rad, az_rad)\n', (16609, 16634), False, 'from rubin_sim.scheduler.utils import smallest_signed_angle\n'), ((17693, 17735), 'numpy.vstack', 'np.vstack', (['[delta_az_short, delta_az_long]'], {}), '([delta_az_short, delta_az_long])\n', (17702, 17735), True, 'import numpy as np\n'), ((18292, 18333), 'numpy.maximum', 'np.maximum', (['telAltSlewTime', 'telAzSlewTime'], {}), '(telAltSlewTime, telAzSlewTime)\n', (18302, 18333), True, 'import numpy as np\n'), ((18626, 18650), 'numpy.where', 'np.where', (['(totTelTime > 0)'], {}), '(totTelTime > 0)\n', (18634, 18650), True, 'import numpy as np\n'), ((18686, 18744), 'numpy.maximum', 'np.maximum', (['(0)', '(self.mount_settletime - olTime[settleAndOL])'], {}), '(0, self.mount_settletime - olTime[settleAndOL])\n', (18696, 18744), True, 'import numpy as np\n'), ((21844, 21878), 'numpy.maximum', 'np.maximum', (['totTelTime', 'totDomTime'], {}), '(totTelTime, totDomTime)\n', (21854, 21878), True, 'import numpy as np\n'), ((21952, 21995), 'numpy.where', 'np.where', (['(filtername != self.current_filter)'], {}), '(filtername != self.current_filter)\n', (21960, 21995), True, 'import numpy as np\n'), ((22029, 22087), 'numpy.maximum', 'np.maximum', (['slewTime[filterChange]', 'self.filter_changetime'], {}), '(slewTime[filterChange], self.filter_changetime)\n', (22039, 22087), True, 'import numpy as np\n'), ((22338, 22368), 'numpy.where', 'np.where', (['(deltaAlt >= cl_limit)'], {}), '(deltaAlt >= cl_limit)\n', (22346, 22368), True, 'import numpy as np\n'), ((2004, 2016), 'rubin_sim.utils.Site', 'Site', (['"""LSST"""'], {}), "('LSST')\n", (2008, 2016), False, 'from rubin_sim.utils import Site, _approx_altAz2RaDec, _approx_altaz2pa, _approx_RaDec2AltAz\n'), ((2053, 2087), 'numpy.radians', 'np.radians', (['self.location.latitude'], {}), '(self.location.latitude)\n', (2063, 2087), True, 'import numpy as np\n'), ((2124, 2159), 'numpy.radians', 'np.radians', (['self.location.longitude'], {}), '(self.location.longitude)\n', (2134, 2159), True, 'import numpy as np\n'), ((8970, 8985), 'numpy.radians', 'np.radians', (['(1.0)'], {}), '(1.0)\n', (8980, 8985), True, 'import numpy as np\n'), ((15549, 15571), 'numpy.isfinite', 'np.isfinite', (['rotTelPos'], {}), '(rotTelPos)\n', (15560, 15571), True, 'import numpy as np\n'), ((15843, 15899), 'rubin_sim.utils._approx_altaz2pa', '_approx_altaz2pa', (['alt_rad', 'az_rad', 'self.location.lat_rad'], {}), '(alt_rad, az_rad, self.location.lat_rad)\n', (15859, 15899), False, 'from rubin_sim.utils import Site, _approx_altAz2RaDec, _approx_altaz2pa, _approx_RaDec2AltAz\n'), ((16698, 16726), 'numpy.where', 'np.where', (['(delta_az_short < 0)'], {}), '(delta_az_short < 0)\n', (16706, 16726), True, 'import numpy as np\n'), ((16806, 16834), 'numpy.where', 'np.where', (['(delta_az_short < 0)'], {}), '(delta_az_short < 0)\n', (16814, 16834), True, 'import numpy as np\n'), ((17067, 17177), 'numpy.where', 'np.where', (['((cummulative_az_short < self.telaz_minpos_rad) | (cummulative_az_short >\n self.telaz_maxpos_rad))'], {}), '((cummulative_az_short < self.telaz_minpos_rad) | (\n cummulative_az_short > self.telaz_maxpos_rad))\n', (17075, 17177), True, 'import numpy as np\n'), ((17382, 17490), 'numpy.where', 'np.where', (['((cummulative_az_long < self.telaz_minpos_rad) | (cummulative_az_long >\n self.telaz_maxpos_rad))'], {}), '((cummulative_az_long < self.telaz_minpos_rad) | (\n cummulative_az_long > self.telaz_maxpos_rad))\n', (17390, 17490), True, 'import numpy as np\n'), ((17761, 17779), 'numpy.abs', 'np.abs', (['stacked_az'], {}), '(stacked_az)\n', (17767, 17779), True, 'import numpy as np\n'), ((18162, 18180), 'numpy.abs', 'np.abs', (['deltaAztel'], {}), '(deltaAztel)\n', (18168, 18180), True, 'import numpy as np\n'), ((18846, 18883), 'numpy.maximum', 'np.maximum', (['self.readtime', 'totTelTime'], {}), '(self.readtime, totTelTime)\n', (18856, 18883), True, 'import numpy as np\n'), ((19082, 19128), 'rubin_sim.scheduler.utils.smallest_signed_angle', 'smallest_signed_angle', (['starting_az_rad', 'az_rad'], {}), '(starting_az_rad, az_rad)\n', (19103, 19128), False, 'from rubin_sim.scheduler.utils import smallest_signed_angle\n'), ((19306, 19367), 'numpy.where', 'np.where', (['(deltaAlt ** 2 + deltaAz ** 2 < self.camera_fov ** 2)'], {}), '(deltaAlt ** 2 + deltaAz ** 2 < self.camera_fov ** 2)\n', (19314, 19367), True, 'import numpy as np\n'), ((20531, 20572), 'numpy.maximum', 'np.maximum', (['domAltSlewTime', 'domAzSlewTime'], {}), '(domAltSlewTime, domAzSlewTime)\n', (20541, 20572), True, 'import numpy as np\n'), ((20847, 20888), 'numpy.maximum', 'np.maximum', (['domAltSlewTime', 'domAzSlewTime'], {}), '(domAltSlewTime, domAzSlewTime)\n', (20857, 20888), True, 'import numpy as np\n'), ((20915, 20951), 'numpy.minimum', 'np.minimum', (['totDomTime1', 'totDomTime2'], {}), '(totDomTime1, totDomTime2)\n', (20925, 20951), True, 'import numpy as np\n'), ((21551, 21636), 'numpy.where', 'np.where', (['(domAzSlewTime > 0)', '(domAzSlewTime + self.domaz_settletime)', 'domAzSlewTime'], {}), '(domAzSlewTime > 0, domAzSlewTime + self.domaz_settletime,\n domAzSlewTime)\n', (21559, 21636), True, 'import numpy as np\n'), ((21732, 21773), 'numpy.maximum', 'np.maximum', (['domAltSlewTime', 'domAzSlewTime'], {}), '(domAltSlewTime, domAzSlewTime)\n', (21742, 21773), True, 'import numpy as np\n'), ((22494, 22580), 'numpy.where', 'np.where', (['((alt_rad > self.telalt_maxpos_rad) | (alt_rad < self.telalt_minpos_rad))'], {}), '((alt_rad > self.telalt_maxpos_rad) | (alt_rad < self.\n telalt_minpos_rad))\n', (22502, 22580), True, 'import numpy as np\n'), ((24149, 24183), 'numpy.maximum', 'np.maximum', (['slewTime', 'rotator_time'], {}), '(slewTime, rotator_time)\n', (24159, 24183), True, 'import numpy as np\n'), ((11368, 11393), 'numpy.sqrt', 'np.sqrt', (['(distance / accel)'], {}), '(distance / accel)\n', (11375, 11393), True, 'import numpy as np\n'), ((15966, 16058), 'rubin_sim.utils._approx_altAz2RaDec', '_approx_altAz2RaDec', (['alt_rad', 'az_rad', 'self.location.lat_rad', 'self.location.lon_rad', 'mjd'], {}), '(alt_rad, az_rad, self.location.lat_rad, self.location.\n lon_rad, mjd)\n', (15985, 16058), False, 'from rubin_sim.utils import Site, _approx_altAz2RaDec, _approx_altaz2pa, _approx_RaDec2AltAz\n'), ((23101, 23128), 'numpy.where', 'np.where', (['(rotTelPos > np.pi)'], {}), '(rotTelPos > np.pi)\n', (23109, 23128), True, 'import numpy as np\n'), ((23964, 24015), 'rubin_sim.scheduler.utils.smallest_signed_angle', 'smallest_signed_angle', (['current_rotTelPos', 'rotTelPos'], {}), '(current_rotTelPos, rotTelPos)\n', (23985, 24015), False, 'from rubin_sim.scheduler.utils import smallest_signed_angle\n'), ((17841, 17869), 'numpy.expand_dims', 'np.expand_dims', (['indx'], {'axis': '(0)'}), '(indx, axis=0)\n', (17855, 17869), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import tensorflow as tf
from model import classifier
# Churn Predictor
class Predictor():
def __init__(self):
self.device = "cuda" if tf.test.is_gpu_available() else "cpu"
# load model weights
self.predictor = classifier
self.predictor.load_weights("models/model")
# load scaler
with open('scalers/scaler.pickle', 'rb') as handle:
self.scaler = pickle.load(handle)
# load encoders
with open('encoders/encoder1.pickle', 'rb') as handle:
self.encoder1 = pickle.load(handle)
with open('encoders/encoder2.pickle', 'rb') as handle:
self.encoder2 = pickle.load(handle)
def predict(self, input_data):
# classes
classes = {True : "Customer will churn! :heavy_multiplication_x:", False: "Customer will not churn! :heavy_check_mark:"}
# image name
image_names = {True: "churn", False: "not_churn"}
# encode data
input_data[0][1] = self.encoder1.transform([input_data[0][1]])[0]
input_data[0][2] = self.encoder2.transform([input_data[0][2]])[0]
# scale data
transformed_data = self.scaler.transform(input_data)
# predict
pred = classifier.predict(transformed_data) > 0.5
return classes[pred[0][0]], image_names[pred[0][0]]
| [
"model.classifier.predict",
"tensorflow.test.is_gpu_available",
"pickle.load"
] | [((176, 202), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (200, 202), True, 'import tensorflow as tf\n'), ((418, 437), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (429, 437), False, 'import pickle\n'), ((542, 561), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (553, 561), False, 'import pickle\n'), ((646, 665), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (657, 665), False, 'import pickle\n'), ((1167, 1203), 'model.classifier.predict', 'classifier.predict', (['transformed_data'], {}), '(transformed_data)\n', (1185, 1203), False, 'from model import classifier\n')] |
import numpy as np
def merge_quadrangle_n9(polys, thres=0.3, precision=10000):
from ._C import merge_quadrangle_n9 as nms_impl
if len(polys) == 0:
return np.array([], dtype='float32')
p = polys.copy()
p[:,:8] *= precision
ret = np.array(nms_impl(p, thres), dtype='float32')
ret[:,:8] /= precision
return ret
| [
"numpy.array"
] | [((171, 200), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float32"""'}), "([], dtype='float32')\n", (179, 200), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) 2018 by <NAME> <<EMAIL>>
# Licensed under the MIT license. See LICENSE for details.
from lxml import etree
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert CineCanvas DCP subtitle XML to SRT.')
parser.add_argument("infile", help="CineCanvas XML file to read")
args = parser.parse_args()
with open(args.infile, 'rb') as myfile:
xml_bin = myfile.read()
root = etree.fromstring(xml_bin)
for e in root.findall('.//Subtitle'):
print( e.get('SpotNumber'))
time_in, time_out = e.get('TimeIn'), e.get('TimeOut')
in_parts = time_in.split(':')
out_parts = time_out.split(':')
assert(len(in_parts) == 4)
assert(len(out_parts) == 4)
#print(time_in, ' --> ' , time_out)
in_parts[-1] = '%03d' % (int(in_parts[3]) * 4) # convert 4 msec CineCanvas "ticks" to milliseconds
out_parts[-1] = '%03d' % (int(out_parts[3]) * 4)
print( ':'.join(in_parts[:3]) + ',' + in_parts[3] + ' --> ' + ':'.join(out_parts[:3]) + ',' + out_parts[3] )
for t in e.findall('.//Text'):
if t.text is not None:
print(t.text, end='')
for f in t.findall('.//Font'):
if f.text is not None:
if f.get('Italic') == 'yes':
print( '<i>' + f.text + '</i>', end='')
else:
print(f.text, end='')
print('') # linefeed
print('')
| [
"lxml.etree.fromstring",
"argparse.ArgumentParser"
] | [((206, 293), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert CineCanvas DCP subtitle XML to SRT."""'}), "(description=\n 'Convert CineCanvas DCP subtitle XML to SRT.')\n", (229, 293), False, 'import argparse\n'), ((482, 507), 'lxml.etree.fromstring', 'etree.fromstring', (['xml_bin'], {}), '(xml_bin)\n', (498, 507), False, 'from lxml import etree\n')] |
import sys
import warnings
EXIT_CODE_SUCCESS = 0
EXIT_CODE_ALGORITHM_ERROR = 1
EXIT_CODE_CUSTOMER_ERROR = 2
EXIT_CODE_PLATFORM_ERROR = 3
def convert_to_algorithm_error(exception):
"""Converts the most recent exception to an AlgorithmError if not already
a BaseSdkError.
Returns:
A BaseSdkError that represents the reason the algorithm failed.
"""
if isinstance(exception, BaseSdkError):
return exception
elif "(Platform Error)" in str(exception):
return PlatformError(
"An unexpected error has occurred. Please try again. If the problem persists, contact AWS support.",
caused_by=exception)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Suppress deprecation warning
message = getattr(exception, 'message', str(exception))
return AlgorithmError(message, exception)
def convert_to_customer_data_errors(exception, channel, content_type):
"""Convert exception from data iterators to customer errors.
If exception is a BaseSdkError or not an input error, return the value of exception.
:param exception: (Exception) exception instance
:param channel: (str) data channel name
:param content_type: (str) content type or None
:return: (Exception) an instance of CustomerError or the value of exception parameter
"""
if isinstance(exception, BaseSdkError):
return exception
exception_text = str(exception)
is_nan_error = "(Input Error) (NaN)" in exception_text
is_inf_error = "(Input Error) (Inf)" in exception_text
is_input_error = "(Input Error)" in exception_text
if is_nan_error:
return CustomerError(
"Unable to read data channel '{}'. Found missing (NaN) values. "
"Please remove any missing (NaN) values in the input data.".format(channel), caused_by=exception)
if is_inf_error:
return CustomerError(
"Unable to read data channel '{}'. Found infinite floating point values. "
"Please remove any infinite floating point values in the input data.".format(channel),
caused_by=exception)
if is_input_error and content_type:
return CustomerError(
"Unable to read data channel '{}'. Requested content-type is '{}'. "
"Please verify the data matches the requested content-type.".format(channel, content_type),
caused_by=exception)
if is_input_error:
return CustomerError(
"Unable to read data channel '{}'. "
"Please verify the correct data channel configuration is provided.".format(channel),
caused_by=exception)
return exception
if sys.version_info < (3, 0):
# `raise E, V, T` is a syntax error in Python 3, therefore using `exec`
exec("""
def raise_with_traceback(exception, traceback=None):
if traceback is None:
traceback = sys.exc_info()[2]
raise exception, None, traceback
""")
else:
def raise_with_traceback(exception, traceback=None):
if traceback is None:
traceback = sys.exc_info()[2]
raise exception.with_traceback(traceback)
class BaseSdkError(Exception):
"""Abstract base for all errors that may cause an algorithm to exit/terminate
unsuccessfully. All direct sub-classes should be kept/maintained in this file.
These errors are grouped into three categories:
1. AlgorithmError: an unexpected or unknown failure that cannot be
avoided by the customer and is due to a bug in
the algorithm.
2. CustomerError: a failure which can be prevented/avoided by the
customer (e.g. change mini_batch_size).
3. PlatformError: a failure due to an environmental requirement not
being met (e.g. if the /opt/ml/training directory
is missing).
All other types of errors/exceptions should be converted by default to an
AlgorithmError.
These classes are also responsible for providing the exit behaviour/code,
the failure reason to output for the training service, and the log messages
that should be printed upon termination.
Each type of error may have multiple subclasses that inherit from both
that error type (e.g. CustomerError) and a standard exception type
(e.g. ValueError) to make integration easier and allow these errors to
be caught/handled with standard handlers (instead of having SDK-specific
error code being distributed throughout the codebase). For example, the
following works:
try:
...
if a > 5:
raise CustomerValueError('a should be less than 5')
...
except ValueError:
print('CustomerValueError will get handled here!')
Args: see `Attributes` below.
Attributes:
message (string): Description of why this exception was raised.
caused_by (exception): The underlying exception that caused this
exception to be raised. This should be a non-BaseSdkError.
exit_code (int): The exit code that should be used if this exception
makes it way to the top-level handler.
failure_prefix (string): Prefix for the training job failure status if
this exception is handled at the top-level. This will be seen by the
user in the Console UI.
"""
def __init__(self,
message=None,
caused_by=None,
exit_code=127,
failure_prefix='Algorithm Error'):
formatted_message = BaseSdkError._format_exception_message(message, caused_by)
super(BaseSdkError, self).__init__(formatted_message)
self.message = formatted_message
self.caused_by = caused_by
self.failure_prefix = failure_prefix
self.exit_code = exit_code
@staticmethod
def _format_exception_message(message, caused_by):
"""Generates the exception message.
If a message has been explicitly passed then we use that as the exception
message. If we also know the underlying exception type we prepend that
to the name.
If there is no message but we have an underlying exception then we use
that exceptions message and prepend the type of the exception.
"""
if message:
formatted_message = message
elif caused_by:
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Suppress deprecation warning
formatted_message = getattr(caused_by, 'message', str(caused_by))
else:
formatted_message = "unknown error occurred"
if caused_by:
formatted_message += " (caused by {})".format(caused_by.__class__.__name__)
return formatted_message
def get_error_summary(self):
"""Return a short error summary"""
return "{}: {}".format(self.failure_prefix, self.message)
def get_error_detail(self):
"""Return error details"""
return "Caused by: {}".format(self.caused_by) if self.caused_by else ""
def _format_failure_message(self):
message = self.get_error_summary()
error_detail = self.get_error_detail()
if error_detail:
message += "\n\n{}".format(error_detail)
return message
def failure_message(self):
warnings.warn("deprecated", DeprecationWarning)
return self._format_failure_message()
def public_failure_message(self):
"""Message to print to stdout."""
return self._format_failure_message()
def private_failure_message(self):
"""Message to print to the trusted error channel."""
return self._format_failure_message()
class AlgorithmError(BaseSdkError):
"""Exception used to indicate a problem that occurred with the algorithm."""
def __init__(self, message=None, caused_by=None):
super(AlgorithmError, self).__init__(message,
caused_by,
failure_prefix='Algorithm Error',
exit_code=EXIT_CODE_ALGORITHM_ERROR)
class CustomerError(BaseSdkError):
"""Exception used to indicate a problem caused by mis-configuration or other customer input."""
def __init__(self, message=None, caused_by=None):
super(CustomerError, self).__init__(message,
caused_by,
failure_prefix='Customer Error',
exit_code=EXIT_CODE_CUSTOMER_ERROR)
class PlatformError(BaseSdkError):
"""Exception used to indicate a problem caused by the underlying platform (e.g. network time-outs)."""
def __init__(self, message=None, caused_by=None):
super(PlatformError, self).__init__(message,
caused_by,
failure_prefix='Platform Error',
exit_code=EXIT_CODE_PLATFORM_ERROR)
class CustomerValueError(CustomerError, ValueError):
"""Exception used to indicate a problem caused by mis-configuration or other customer input."""
def __init__(self, message=None, caused_by=None):
super(CustomerValueError, self).__init__(message, caused_by)
class CustomerKeyError(CustomerError, KeyError):
"""Exception used to indicate a problem caused by mis-configuration or other customer input."""
def __init__(self, message=None, caused_by=None):
super(CustomerKeyError, self).__init__(message, caused_by)
class PlatformValueError(PlatformError, ValueError):
"""Exception used to indicate a problem caused by the underlying platform (e.g. network time-outs)."""
def __init__(self, message=None, caused_by=None):
super(PlatformValueError, self).__init__(message, caused_by)
class PlatformKeyError(PlatformError, KeyError):
"""Exception used to indicate a problem caused by the underlying platform (e.g. network time-outs)."""
def __init__(self, message=None, caused_by=None):
super(PlatformKeyError, self).__init__(message, caused_by)
| [
"warnings.simplefilter",
"sys.exc_info",
"warnings.warn",
"warnings.catch_warnings"
] | [((678, 703), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (701, 703), False, 'import warnings\n'), ((713, 744), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (734, 744), False, 'import warnings\n'), ((7459, 7506), 'warnings.warn', 'warnings.warn', (['"""deprecated"""', 'DeprecationWarning'], {}), "('deprecated', DeprecationWarning)\n", (7472, 7506), False, 'import warnings\n'), ((3094, 3108), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3106, 3108), False, 'import sys\n'), ((6490, 6515), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6513, 6515), False, 'import warnings\n'), ((6533, 6564), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (6554, 6564), False, 'import warnings\n')] |
import pandas as pd
from lc_classifier.features import ZTFLightcurvePreprocessor
# Loading data
detections = pd.read_csv(
'detections.csv',
index_col='oid',
na_values=['Infinity'])
non_detections = pd.read_csv(
'non_detections.csv',
index_col='oid',
na_values=['Infinity'])
labels = pd.read_csv(
'dfcrossmatches_prioritized_v5.1.csv',
index_col='oid',
na_values=['Infinity']
)
# Adapt classes for the paper
unused_classes = ['TDE', 'ZZ']
rename_class_dictionary = {
'EA': 'EB',
'EB/EW': 'EB',
'RSCVn': 'Periodic-Other',
'SNIIb': 'SNII',
'SNIIn': 'SNII'
}
labels = labels[~labels.classALeRCE.isin(unused_classes)].copy()
labels['classALeRCE'] = labels['classALeRCE'].map(
rename_class_dictionary).fillna(labels['classALeRCE'])
# Intersecting labels and detections
valid_oids = detections.index.unique().intersection(
labels.index.unique())
labeled_detections = detections.loc[valid_oids]
labels = labels.loc[valid_oids].copy()
valid_oids = valid_oids.intersection(non_detections.index.unique())
labeled_non_detections = non_detections.loc[valid_oids]
# ZTF preprocessing
preprocessor_ztf = ZTFLightcurvePreprocessor()
labeled_detections = preprocessor_ztf.preprocess(labeled_detections)
# Save data
labeled_detections.to_pickle('dataset_detections.pkl')
labeled_non_detections.to_pickle('dataset_non_detections.pkl')
labels.to_pickle('dataset_labels.pkl')
| [
"lc_classifier.features.ZTFLightcurvePreprocessor",
"pandas.read_csv"
] | [((112, 182), 'pandas.read_csv', 'pd.read_csv', (['"""detections.csv"""'], {'index_col': '"""oid"""', 'na_values': "['Infinity']"}), "('detections.csv', index_col='oid', na_values=['Infinity'])\n", (123, 182), True, 'import pandas as pd\n'), ((214, 288), 'pandas.read_csv', 'pd.read_csv', (['"""non_detections.csv"""'], {'index_col': '"""oid"""', 'na_values': "['Infinity']"}), "('non_detections.csv', index_col='oid', na_values=['Infinity'])\n", (225, 288), True, 'import pandas as pd\n'), ((312, 407), 'pandas.read_csv', 'pd.read_csv', (['"""dfcrossmatches_prioritized_v5.1.csv"""'], {'index_col': '"""oid"""', 'na_values': "['Infinity']"}), "('dfcrossmatches_prioritized_v5.1.csv', index_col='oid',\n na_values=['Infinity'])\n", (323, 407), True, 'import pandas as pd\n'), ((1164, 1191), 'lc_classifier.features.ZTFLightcurvePreprocessor', 'ZTFLightcurvePreprocessor', ([], {}), '()\n', (1189, 1191), False, 'from lc_classifier.features import ZTFLightcurvePreprocessor\n')] |
'''adampapercifar for CIFAR10. FC layers are removed. Paddings are adjusted.
Modified by <NAME> and <NAME> (Winter 2019).
Without BN, the start learning rate should be 0.01
(c) <NAME>
'''
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['mnistmlp']
class MnistMLP(nn.Module):
def __init__(self, num_classes=10, dropout=False):
super(MnistMLP, self).__init__()
self.dropout = dropout
if self.dropout:
print('Using Dropout 0.5 for Model.')
self.fc1 = nn.Linear(784, 1000)
self.fc2 = nn.Linear(1000, 1000)
self.fc3 = nn.Linear(1000, num_classes)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
if self.dropout: # just here
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def mnistmlp(**kwargs):
model = MnistMLP(**kwargs)
return model
| [
"torch.nn.functional.dropout",
"torch.nn.Linear"
] | [((538, 558), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(1000)'], {}), '(784, 1000)\n', (547, 558), True, 'import torch.nn as nn\n'), ((578, 599), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (587, 599), True, 'import torch.nn as nn\n'), ((619, 647), 'torch.nn.Linear', 'nn.Linear', (['(1000)', 'num_classes'], {}), '(1000, num_classes)\n', (628, 647), True, 'import torch.nn as nn\n'), ((818, 861), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (827, 861), True, 'import torch.nn.functional as F\n')] |
from django.urls import reverse
from rest_framework import status
from authors.apps.articles.tests import base_class
from ..test_data import test_article_data
class TestArticleAPIEndpoints(base_class.BaseTest):
def setUp(self):
super().setUp()
self.user, self.article = self.create_article_and_authenticate_test_user()
def test_user_can_undo_a_like(self):
self.assertEqual(self.article.is_liked_by(self.user), False)
response = self.client.post(self.like_article_url(self.article.slug))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.article.is_liked_by(self.user), True)
response = self.client.delete(self.like_article_url(self.article.slug))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.article.is_liked_by(self.user), False)
def test_user_can_undo_a_dislike(self):
self.assertEqual(self.article.is_liked_by(self.user), False)
response = self.client.post(
self.dislike_article_url(self.article.slug))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.article.is_disliked_by(self.user), True)
response = self.client.delete(
self.dislike_article_url(self.article.slug))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.article.is_disliked_by(self.user), False)
def test_like_then_dislike_undoes_the_like(self):
self.article.liked_by.add(self.user)
self.assertEqual(self.article.is_liked_by(self.user), True)
self.assertEqual(self.article.is_disliked_by(self.user), False)
response = self.client.post(
self.dislike_article_url(self.article.slug))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.article.is_liked_by(self.user), False)
self.assertEqual(self.article.is_disliked_by(self.user), True)
def test_dislike_then_like_undoes_the_dislike(self):
self.article.disliked_by.add(self.user)
self.assertEqual(self.article.is_disliked_by(self.user), True)
self.assertEqual(self.article.is_liked_by(self.user), False)
response = self.client.post(self.like_article_url(self.article.slug))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.article.is_disliked_by(self.user), False)
self.assertEqual(self.article.is_liked_by(self.user), True)
def test_multiple_users_can_like_an_article(self):
self.assertEqual(self.article.like_count, 0)
# first like
self.client.post(self.like_article_url(self.article.slug))
self.assertEqual(self.article.like_count, 1)
user2 = self.create_article_and_authenticate_test_user_2()
self.client.force_authenticate(user2)
# second like
self.client.post(self.like_article_url(self.article.slug))
self.assertEqual(self.article.like_count, 2)
def test_user_can_the_number_of_people_that_liked_an_article(self):
"""a user can get an article's like count"""
# first like
self.client.post(self.like_article_url(self.article.slug))
user2 = self.create_article_and_authenticate_test_user_2()
self.client.force_authenticate(user2)
# second like
self.client.post(self.like_article_url(self.article.slug))
response = self.client.get(self.article_url(self.article.slug))
self.assertEqual(response.data['like_count'], 2)
def test_if_tags_are_in_response_object(self):
user2 = self.create_article_and_authenticate_test_user_2()
self.client.force_authenticate(user2)
response = self.client.post(self.articles_url,
data=test_article_data.
valid_article_data,
format='json')
self.assertIn('tag_list', response.data)
def test_get_all_tags(self):
user2 = self.create_article_and_authenticate_test_user_2()
self.client.force_authenticate(user2)
self.client.post(self.articles_url,
data=test_article_data.
valid_article_data_with_tags,
format='json')
response = self.client.get(reverse('articles:tags'))
# print(response.data)
self.assertEqual(2, len(response.data['tags']))
| [
"django.urls.reverse"
] | [((4393, 4417), 'django.urls.reverse', 'reverse', (['"""articles:tags"""'], {}), "('articles:tags')\n", (4400, 4417), False, 'from django.urls import reverse\n')] |
from compas.artists import Artist
class FakeArtist(Artist):
def draw(self):
pass
class FakeSubArtist(Artist):
def draw(self):
pass
class FakeItem(object):
pass
class FakeSubItem(FakeItem):
pass
def test_get_artist_cls_with_orderly_registration():
Artist.register(FakeItem, FakeArtist, context='fake')
Artist.register(FakeSubItem, FakeSubArtist, context='fake')
item = FakeItem()
artist = Artist(item, context='fake')
assert isinstance(artist, FakeArtist)
item = FakeSubItem()
artist = Artist(item, context='fake')
assert isinstance(artist, FakeSubArtist)
def test_get_artist_cls_with_out_of_order_registration():
Artist.register(FakeSubItem, FakeSubArtist, context='fake')
Artist.register(FakeItem, FakeArtist, context='fake')
item = FakeItem()
artist = Artist(item, context='fake')
assert isinstance(artist, FakeArtist)
item = FakeSubItem()
artist = Artist(item, context='fake')
assert isinstance(artist, FakeSubArtist)
| [
"compas.artists.Artist",
"compas.artists.Artist.register"
] | [((293, 346), 'compas.artists.Artist.register', 'Artist.register', (['FakeItem', 'FakeArtist'], {'context': '"""fake"""'}), "(FakeItem, FakeArtist, context='fake')\n", (308, 346), False, 'from compas.artists import Artist\n'), ((351, 410), 'compas.artists.Artist.register', 'Artist.register', (['FakeSubItem', 'FakeSubArtist'], {'context': '"""fake"""'}), "(FakeSubItem, FakeSubArtist, context='fake')\n", (366, 410), False, 'from compas.artists import Artist\n'), ((446, 474), 'compas.artists.Artist', 'Artist', (['item'], {'context': '"""fake"""'}), "(item, context='fake')\n", (452, 474), False, 'from compas.artists import Artist\n'), ((556, 584), 'compas.artists.Artist', 'Artist', (['item'], {'context': '"""fake"""'}), "(item, context='fake')\n", (562, 584), False, 'from compas.artists import Artist\n'), ((694, 753), 'compas.artists.Artist.register', 'Artist.register', (['FakeSubItem', 'FakeSubArtist'], {'context': '"""fake"""'}), "(FakeSubItem, FakeSubArtist, context='fake')\n", (709, 753), False, 'from compas.artists import Artist\n'), ((758, 811), 'compas.artists.Artist.register', 'Artist.register', (['FakeItem', 'FakeArtist'], {'context': '"""fake"""'}), "(FakeItem, FakeArtist, context='fake')\n", (773, 811), False, 'from compas.artists import Artist\n'), ((847, 875), 'compas.artists.Artist', 'Artist', (['item'], {'context': '"""fake"""'}), "(item, context='fake')\n", (853, 875), False, 'from compas.artists import Artist\n'), ((957, 985), 'compas.artists.Artist', 'Artist', (['item'], {'context': '"""fake"""'}), "(item, context='fake')\n", (963, 985), False, 'from compas.artists import Artist\n')] |
from fastapi import APIRouter
router = APIRouter()
@router.get("/")
def working():
return {"music"}
| [
"fastapi.APIRouter"
] | [((40, 51), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (49, 51), False, 'from fastapi import APIRouter\n')] |
import logging
import re
from copy import copy
from pathlib import PosixPath
from typing import Dict, List, Tuple
import pandas as pd
from pandas import DataFrame, Series
from omegaconf import DictConfig
from .mortality_file_extractor import MortalityFileExtractor
class MortalityXLSXExtractor(MortalityFileExtractor):
"""
Extracts mortality facts from Statistics Poland XLS files
"""
def __init__(self, file_path: PosixPath, cfg: DictConfig):
self.file_path = file_path
self.cfg = cfg
self.mortality_facts = DataFrame()
self.log = logging.getLogger(__name__)
@property
def reported_year(self) -> int:
"""
Returns the reported actuals year
"""
return int(self.file_path.stem.split('_')[-1])
@property
def genders(self) -> Dict[str, int]:
return self.cfg.raw_data.genders
@property
def fact_columns(self) -> List[str]:
return self.cfg.raw_data.fact_columns
@property
def regions(self) -> Dict[str, int]:
return self.cfg.raw_data.regions
@property
def age_groups(self) -> Dict[str, int]:
return self.cfg.raw_data.age_groups
@property
def fact_year(self) -> int:
return int(self.file_path.stem.split('_')[-1])
def extract_actuals(self) -> None:
"""
Extracts mortality data facts
"""
for gender_data in self.genders.items():
self._extract_gender_sheet(gender_data)
if not self.mortality_facts.empty:
self.log.info(f'Year: {self.fact_year} - {len(self.mortality_facts)} mortality facts extracted ({self.mortality_facts.deceased_actuals.sum():.0f} of deaths in total)')
return self.mortality_facts
def _extract_gender_sheet(self, gender_data: Tuple[str, int]) -> None:
"""
Exctracts mortality data facts from a given gender sheet
"""
gender_sheet = self._get_gender_sheet(gender_data)
gender_sheet_facts = self._transform_gender_sheet_into_facts_table(gender_sheet)
self.mortality_facts = pd.concat((self.mortality_facts, gender_sheet_facts))
def _get_gender_sheet(self, gender_data: Tuple[str, int]) -> DataFrame:
"""
Extracts gender sheet
"""
gender_sheet_name, gender_id = gender_data
raw_annual_gender_mortality_facts = self._read_raw_xlsx_sheet(gender_sheet_name)
gender_mortality_facts = self._map_gender(raw_annual_gender_mortality_facts, gender_id)
region_gender_mortality_facts = self._map_regions(gender_mortality_facts)
gender_sheet = self._map_age_groups(region_gender_mortality_facts)
return gender_sheet
def _read_raw_xlsx_sheet(self, gender_sheet_name: str) -> DataFrame:
"""
Reads a raw xlsx sheet
"""
raw_annual_mortality_facts = pd.read_excel(self.file_path,
engine='openpyxl',
header=6,
sheet_name=gender_sheet_name)
raw_annual_mortality_facts = raw_annual_mortality_facts[1:] # first row is blank, skipping row in pandas doesn't work correctly
return raw_annual_mortality_facts
def _map_gender(self, raw_annual_gender_mortality_facts: DataFrame, gender_id: int) -> DataFrame:
"""
Maps gender
"""
gender_mortality_facts = raw_annual_gender_mortality_facts.copy()
gender_mortality_facts['gender'] = gender_id
return gender_mortality_facts
def _map_regions(self, raw_annual_gender_mortality_facts: DataFrame) -> DataFrame:
"""
Maps and filters regions
"""
region_column = raw_annual_gender_mortality_facts.columns[1]
raw_annual_gender_mortality_facts.rename(columns={region_column: 'region'}, inplace=True)
region_gender_mortality_facts = raw_annual_gender_mortality_facts[
raw_annual_gender_mortality_facts['region'].isin(self.regions.keys())
].copy()
region_gender_mortality_facts['region'].replace(self.regions, inplace=True)
return region_gender_mortality_facts
def _map_age_groups(self, region_gender_mortality_facts: DataFrame) -> DataFrame:
"""
Maps and filters age groups
"""
age_group_column = region_gender_mortality_facts.columns[0]
region_gender_mortality_facts.rename(columns={age_group_column: 'age_group'}, inplace=True)
gender_sheet_facts = region_gender_mortality_facts[
region_gender_mortality_facts['age_group'].isin(self.age_groups.keys())
].copy()
gender_sheet_facts['age_group'].replace(self.age_groups, inplace=True)
return gender_sheet_facts
def _transform_gender_sheet_into_facts_table(self, gender_sheet: DataFrame) -> DataFrame:
"""
Transforms extracted gender table into facts table
"""
new_rows = []
for _, row in gender_sheet.iterrows():
facts_from_row = self._get_facts_from_row(row)
new_rows.append(facts_from_row)
all_facts = sum(new_rows, [])
gender_sheet_facts = DataFrame(all_facts)
return gender_sheet_facts
def _get_facts_from_row(self, row: Series) -> List[dict]:
"""
Returns all facts from the xls sheet row
"""
fact_base = self._get_fact_base(row)
facts_from_row = []
for column_name in row.index:
if not self.is_date_column(column_name):
continue
fact = self._get_fact(fact_base, column_name, row)
if pd.isnull(fact['deceased_actuals']):
continue
facts_from_row.append(fact)
return facts_from_row
def _get_fact_base(self, row: Series) -> dict:
"""
Returns the fact base for the gender sheet row
"""
fact_base = {'year': self.reported_year}
for column_name in row.index:
if column_name not in self.fact_columns:
continue
fact_base[column_name] = row[column_name]
return fact_base
def _get_fact(self, fact_base: dict, date_column_name: str, row: Series) -> dict:
"""
Gets single fact dictionary
"""
fact = copy(fact_base)
fact['week'] = self._get_week_number(date_column_name)
fact['deceased_actuals'] = row[date_column_name]
return fact
@staticmethod
def _get_week_number(date_column_name: str):
"""
Returns the week number from the column name
"""
return int(date_column_name[1:])
@staticmethod
def is_date_column(column_name: str) -> bool:
"""
Checks if column name refers to a date column
"""
match = re.match('^[T][0-9][0-9]', column_name)
return match is not None
| [
"logging.getLogger",
"pandas.isnull",
"re.match",
"pandas.read_excel",
"pandas.DataFrame",
"copy.copy",
"pandas.concat"
] | [((555, 566), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (564, 566), False, 'from pandas import DataFrame, Series\n'), ((586, 613), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (603, 613), False, 'import logging\n'), ((2107, 2160), 'pandas.concat', 'pd.concat', (['(self.mortality_facts, gender_sheet_facts)'], {}), '((self.mortality_facts, gender_sheet_facts))\n', (2116, 2160), True, 'import pandas as pd\n'), ((2879, 2972), 'pandas.read_excel', 'pd.read_excel', (['self.file_path'], {'engine': '"""openpyxl"""', 'header': '(6)', 'sheet_name': 'gender_sheet_name'}), "(self.file_path, engine='openpyxl', header=6, sheet_name=\n gender_sheet_name)\n", (2892, 2972), True, 'import pandas as pd\n'), ((5153, 5173), 'pandas.DataFrame', 'DataFrame', (['all_facts'], {}), '(all_facts)\n', (5162, 5173), False, 'from pandas import DataFrame, Series\n'), ((6280, 6295), 'copy.copy', 'copy', (['fact_base'], {}), '(fact_base)\n', (6284, 6295), False, 'from copy import copy\n'), ((6789, 6828), 're.match', 're.match', (['"""^[T][0-9][0-9]"""', 'column_name'], {}), "('^[T][0-9][0-9]', column_name)\n", (6797, 6828), False, 'import re\n'), ((5611, 5646), 'pandas.isnull', 'pd.isnull', (["fact['deceased_actuals']"], {}), "(fact['deceased_actuals'])\n", (5620, 5646), True, 'import pandas as pd\n')] |
#!/bin/python3
from flocking.server import server
server.launch()
| [
"flocking.server.server.launch"
] | [((52, 67), 'flocking.server.server.launch', 'server.launch', ([], {}), '()\n', (65, 67), False, 'from flocking.server import server\n')] |
from django.urls import path
from . import views
app_name='product'
urlpatterns = [
path('home/', views.home, name='home'),
path('add/', views.product_add, name='add'),
path('', views.product_all, name='all'),
path('<slug:product_slug>', views.product_detail, name='detail'),
path('<slug:product_slug>/update', views.product_update, name='update'),
path('<slug:product_slug>/delete', views.product_delete, name='delete'),
path('category/<slug:category_slug>', views.category_filter, name='category_filter'),
]
| [
"django.urls.path"
] | [((91, 129), 'django.urls.path', 'path', (['"""home/"""', 'views.home'], {'name': '"""home"""'}), "('home/', views.home, name='home')\n", (95, 129), False, 'from django.urls import path\n'), ((135, 178), 'django.urls.path', 'path', (['"""add/"""', 'views.product_add'], {'name': '"""add"""'}), "('add/', views.product_add, name='add')\n", (139, 178), False, 'from django.urls import path\n'), ((184, 223), 'django.urls.path', 'path', (['""""""', 'views.product_all'], {'name': '"""all"""'}), "('', views.product_all, name='all')\n", (188, 223), False, 'from django.urls import path\n'), ((229, 293), 'django.urls.path', 'path', (['"""<slug:product_slug>"""', 'views.product_detail'], {'name': '"""detail"""'}), "('<slug:product_slug>', views.product_detail, name='detail')\n", (233, 293), False, 'from django.urls import path\n'), ((299, 370), 'django.urls.path', 'path', (['"""<slug:product_slug>/update"""', 'views.product_update'], {'name': '"""update"""'}), "('<slug:product_slug>/update', views.product_update, name='update')\n", (303, 370), False, 'from django.urls import path\n'), ((376, 447), 'django.urls.path', 'path', (['"""<slug:product_slug>/delete"""', 'views.product_delete'], {'name': '"""delete"""'}), "('<slug:product_slug>/delete', views.product_delete, name='delete')\n", (380, 447), False, 'from django.urls import path\n'), ((453, 542), 'django.urls.path', 'path', (['"""category/<slug:category_slug>"""', 'views.category_filter'], {'name': '"""category_filter"""'}), "('category/<slug:category_slug>', views.category_filter, name=\n 'category_filter')\n", (457, 542), False, 'from django.urls import path\n')] |
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn import metrics
import pickle
import random
random.seed(12345)
raw = pd.read_csv('./tmp/vectorized_feature_w_ranks_norm.txt')
X = raw.loc[:, 'bi_rank':'vowel_ratio'].as_matrix()
Y = raw.loc[:, 'class'].as_matrix()
domains = raw.ix[:, 'ip'].as_matrix()
from sklearn import linear_model, decomposition, datasets
n_samples, n_features = X.shape
p = list(range(n_samples)) # Shuffle samples
# random initialization
classifier = SVC(kernel='linear', probability=True, random_state=0)
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
precision_list = []
recall_list = []
area_list = []
fpr_list = []
tpr_list = []
roc_auc_list = []
accuracy_list = []
from sklearn.metrics import roc_curve, auc
max_ac = 0
for i in range(10): # 10 fold cross-validation
print('x-validation round %d' % i)
random.seed(i)
p = random.sample(p, int(n_samples / 10))
# random.shuffle(p)
XX, yy = X[p], Y[p]
cut_off = int(len(XX) / 5) * 4
probas_ = classifier.fit(XX[:cut_off], yy[:cut_off]).predict(XX[cut_off:])
# precision, recall, thresholds = precision_recall_curve(yy[cut_off:], probas_)
# fpr, tpr, thresholds = roc_curve(yy[cut_off:], probas_)
# roc_auc = auc(fpr, tpr)
# area = auc(recall, precision)
# precision_list.append(precision)
# recall_list.append(recall)
# area_list.append(area)
# fpr_list.append(fpr)
# tpr_list.append(tpr)
# roc_auc_list.append(roc_auc)
# pred = [int(i > 0.5) for i in probas_]
# accuracy_list.append(accuracy_score(yy[cut_off:], pred, normalize=True))
ac_score = metrics.accuracy_score(yy[cut_off:], probas_)
print(ac_score)
if ac_score > max_ac:
with open('./model/svm_738.pickle', 'wb') as fw:
pickle.dump(classifier, fw)
print("第%d次训练,保存模型" % i)
max_ac = ac_score
| [
"pickle.dump",
"pandas.read_csv",
"random.seed",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC"
] | [((124, 142), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (135, 142), False, 'import random\n'), ((150, 206), 'pandas.read_csv', 'pd.read_csv', (['"""./tmp/vectorized_feature_w_ranks_norm.txt"""'], {}), "('./tmp/vectorized_feature_w_ranks_norm.txt')\n", (161, 206), True, 'import pandas as pd\n'), ((514, 568), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'probability': '(True)', 'random_state': '(0)'}), "(kernel='linear', probability=True, random_state=0)\n", (517, 568), False, 'from sklearn.svm import SVC\n'), ((961, 975), 'random.seed', 'random.seed', (['i'], {}), '(i)\n', (972, 975), False, 'import random\n'), ((1725, 1770), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['yy[cut_off:]', 'probas_'], {}), '(yy[cut_off:], probas_)\n', (1747, 1770), False, 'from sklearn import metrics\n'), ((1886, 1913), 'pickle.dump', 'pickle.dump', (['classifier', 'fw'], {}), '(classifier, fw)\n', (1897, 1913), False, 'import pickle\n')] |
# See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
class SchoolTeacher(models.Model):
''' Defining a Teacher information '''
_name = 'school.teacher'
_description = 'Teacher Information'
employee_id = fields.Many2one('hr.employee', 'Employee ID',
ondelete="cascade",
delegate=True, required=True)
standard_id = fields.Many2one('school.standard',
"Responsibility of Academic Class",
help="Standard for which the teacher\
responsible for.")
stand_id = fields.Many2one('standard.standard', "Course",
related="standard_id.standard_id", store=True)
subject_id = fields.Many2many('subject.subject', 'subject_teacher_rel',
'teacher_id', 'subject_id',
'Course-Subjects')
school_id = fields.Many2one('school.school', "Campus",
related="standard_id.school_id", store=True)
category_ids = fields.Many2many('hr.employee.category',
'employee_category_rel', 'emp_id',
'category_id', 'Tags')
department_id = fields.Many2one('hr.department', 'Department')
is_parent = fields.Boolean('Is Parent')
stu_parent_id = fields.Many2one('school.parent', 'Related Parent')
student_id = fields.Many2many('student.student',
'students_teachers_parent_rel',
'teacher_id', 'student_id',
'Children')
phone_numbers = fields.Char("Phone Number")
@api.onchange('is_parent')
def _onchange_isparent(self):
if self.is_parent:
self.stu_parent_id = False
self.student_id = [(6, 0, [])]
@api.onchange('stu_parent_id')
def _onchangestudent_parent(self):
stud_list = []
if self.stu_parent_id and self.stu_parent_id.student_id:
for student in self.stu_parent_id.student_id:
stud_list.append(student.id)
self.student_id = [(6, 0, stud_list)]
@api.model
def create(self, vals):
teacher_id = super(SchoolTeacher, self).create(vals)
user_obj = self.env['res.users']
user_vals = {'name': teacher_id.name,
'login': teacher_id.work_email,
'email': teacher_id.work_email,
}
ctx_vals = {'teacher_create': True,
'school_id': teacher_id.school_id.company_id.id}
user_id = user_obj.with_context(ctx_vals).create(user_vals)
teacher_id.employee_id.write({'user_id': user_id.id})
if vals.get('is_parent'):
self.parent_crt(teacher_id)
return teacher_id
@api.multi
def parent_crt(self, manager_id):
stu_parent = []
if manager_id.stu_parent_id:
stu_parent = manager_id.stu_parent_id
if not stu_parent:
emp_user = manager_id.employee_id
students = [stu.id for stu in manager_id.student_id]
parent_vals = {'name': manager_id.name,
'email': emp_user.work_email,
'parent_create_mng': 'parent',
'user_ids': [(6, 0, [emp_user.user_id.id])],
'partner_id': emp_user.user_id.partner_id.id,
'student_id': [(6, 0, students)]}
stu_parent = self.env['school.parent'].create(parent_vals)
manager_id.write({'stu_parent_id': stu_parent.id})
user = stu_parent.user_ids
user_rec = user[0]
parent_grp_id = self.env.ref('school.group_school_parent')
groups = parent_grp_id
if user_rec.groups_id:
groups = user_rec.groups_id
groups += parent_grp_id
group_ids = [group.id for group in groups]
user_rec.write({'groups_id': [(6, 0, group_ids)]})
@api.multi
def write(self, vals):
if vals.get('is_parent'):
self.parent_crt(self)
if vals.get('student_id'):
self.stu_parent_id.write({'student_id': vals.get('student_id')})
if not vals.get('is_parent'):
user_rec = self.employee_id.user_id
ir_obj = self.env['ir.model.data']
parent_grp_id = ir_obj.get_object('school', 'group_school_parent')
groups = parent_grp_id
if user_rec.groups_id:
groups = user_rec.groups_id
groups -= parent_grp_id
group_ids = [group.id for group in groups]
user_rec.write({'groups_id': [(6, 0, group_ids)]})
return super(SchoolTeacher, self).write(vals)
@api.onchange('address_id')
def onchange_address_id(self):
self.work_phone = False
self.mobile_phone = False
if self.address_id:
self.work_phone = self.address_id.phone,
self.mobile_phone = self.address_id.mobile
@api.onchange('department_id')
def onchange_department_id(self):
if self.department_id:
self.parent_id = (self.department_id and
self.department_id.manager_id and
self.department_id.manager_id.id) or False
@api.onchange('user_id')
def onchange_user(self):
if self.user_id:
self.name = self.name or self.user_id.name
self.work_email = self.user_id.email
self.image = self.image or self.user_id.image
@api.onchange('school_id')
def onchange_school(self):
self.address_id = False
self.mobile_phone = False
self.work_location = False
self.work_email = False
self.work_phone = False
if self.school_id:
self.address_id = self.school_id.company_id.partner_id.id
self.mobile_phone = self.school_id.company_id.partner_id.mobile
self.work_location = self.school_id.company_id.partner_id.city
self.work_email = self.school_id.company_id.partner_id.email
phone = self.school_id.company_id.partner_id.phone
self.work_phone = phone
self.phone_numbers = phone
phone = self.school_id.company_id.partner_id.phone
| [
"odoo.fields.Many2one",
"odoo.api.onchange",
"odoo.fields.Many2many",
"odoo.fields.Char",
"odoo.fields.Boolean"
] | [((268, 368), 'odoo.fields.Many2one', 'fields.Many2one', (['"""hr.employee"""', '"""Employee ID"""'], {'ondelete': '"""cascade"""', 'delegate': '(True)', 'required': '(True)'}), "('hr.employee', 'Employee ID', ondelete='cascade', delegate=\n True, required=True)\n", (283, 368), False, 'from odoo import models, fields, api\n'), ((450, 624), 'odoo.fields.Many2one', 'fields.Many2one', (['"""school.standard"""', '"""Responsibility of Academic Class"""'], {'help': '"""Standard for which the teacher responsible for."""'}), "('school.standard', 'Responsibility of Academic Class', help\n =\n 'Standard for which the teacher responsible for.'\n )\n", (465, 624), False, 'from odoo import models, fields, api\n'), ((695, 793), 'odoo.fields.Many2one', 'fields.Many2one', (['"""standard.standard"""', '"""Course"""'], {'related': '"""standard_id.standard_id"""', 'store': '(True)'}), "('standard.standard', 'Course', related=\n 'standard_id.standard_id', store=True)\n", (710, 793), False, 'from odoo import models, fields, api\n'), ((837, 946), 'odoo.fields.Many2many', 'fields.Many2many', (['"""subject.subject"""', '"""subject_teacher_rel"""', '"""teacher_id"""', '"""subject_id"""', '"""Course-Subjects"""'], {}), "('subject.subject', 'subject_teacher_rel', 'teacher_id',\n 'subject_id', 'Course-Subjects')\n", (853, 946), False, 'from odoo import models, fields, api\n'), ((1027, 1118), 'odoo.fields.Many2one', 'fields.Many2one', (['"""school.school"""', '"""Campus"""'], {'related': '"""standard_id.school_id"""', 'store': '(True)'}), "('school.school', 'Campus', related='standard_id.school_id',\n store=True)\n", (1042, 1118), False, 'from odoo import models, fields, api\n'), ((1166, 1268), 'odoo.fields.Many2many', 'fields.Many2many', (['"""hr.employee.category"""', '"""employee_category_rel"""', '"""emp_id"""', '"""category_id"""', '"""Tags"""'], {}), "('hr.employee.category', 'employee_category_rel', 'emp_id',\n 'category_id', 'Tags')\n", (1182, 1268), False, 'from odoo import models, fields, api\n'), ((1357, 1403), 'odoo.fields.Many2one', 'fields.Many2one', (['"""hr.department"""', '"""Department"""'], {}), "('hr.department', 'Department')\n", (1372, 1403), False, 'from odoo import models, fields, api\n'), ((1420, 1447), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Is Parent"""'], {}), "('Is Parent')\n", (1434, 1447), False, 'from odoo import models, fields, api\n'), ((1468, 1518), 'odoo.fields.Many2one', 'fields.Many2one', (['"""school.parent"""', '"""Related Parent"""'], {}), "('school.parent', 'Related Parent')\n", (1483, 1518), False, 'from odoo import models, fields, api\n'), ((1536, 1647), 'odoo.fields.Many2many', 'fields.Many2many', (['"""student.student"""', '"""students_teachers_parent_rel"""', '"""teacher_id"""', '"""student_id"""', '"""Children"""'], {}), "('student.student', 'students_teachers_parent_rel',\n 'teacher_id', 'student_id', 'Children')\n", (1552, 1647), False, 'from odoo import models, fields, api\n'), ((1766, 1793), 'odoo.fields.Char', 'fields.Char', (['"""Phone Number"""'], {}), "('Phone Number')\n", (1777, 1793), False, 'from odoo import models, fields, api\n'), ((1800, 1825), 'odoo.api.onchange', 'api.onchange', (['"""is_parent"""'], {}), "('is_parent')\n", (1812, 1825), False, 'from odoo import models, fields, api\n'), ((1975, 2004), 'odoo.api.onchange', 'api.onchange', (['"""stu_parent_id"""'], {}), "('stu_parent_id')\n", (1987, 2004), False, 'from odoo import models, fields, api\n'), ((4903, 4929), 'odoo.api.onchange', 'api.onchange', (['"""address_id"""'], {}), "('address_id')\n", (4915, 4929), False, 'from odoo import models, fields, api\n'), ((5173, 5202), 'odoo.api.onchange', 'api.onchange', (['"""department_id"""'], {}), "('department_id')\n", (5185, 5202), False, 'from odoo import models, fields, api\n'), ((5468, 5491), 'odoo.api.onchange', 'api.onchange', (['"""user_id"""'], {}), "('user_id')\n", (5480, 5491), False, 'from odoo import models, fields, api\n'), ((5714, 5739), 'odoo.api.onchange', 'api.onchange', (['"""school_id"""'], {}), "('school_id')\n", (5726, 5739), False, 'from odoo import models, fields, api\n')] |
""" Abstract base class for a simple GA
This GA will always seek to maximize fitness
"""
from abc import abstractmethod, ABC
import os
import pickle
import random
from totter.api.qwop import QwopEvaluator, QwopStrategy
from totter.evolution.Individual import Individual
from totter.evolution.Population import Population
import totter.utils.storage as storage
class GeneticAlgorithm(ABC):
def __init__(self,
eval_time_limit=240,
pop_size=20,
cx_prob=0.9,
mt_prob=0.05,
steady_state=True,
population_seeding_pool=None,
seeding_time_limit=60,
skip_init=False):
self.eval_time_limit = eval_time_limit
self.total_evaluations = 0
self.qwop_evaluator = QwopEvaluator(time_limit=self.eval_time_limit)
self.pop_size = pop_size
self.cx_prob = cx_prob
self.mt_prob = mt_prob
self.steady_state = steady_state
self.population_seeding_pool = population_seeding_pool
self.seeding_time_limit = seeding_time_limit
if not skip_init:
if population_seeding_pool is None:
# create a random population
individuals = [Individual(self.generate_random_genome()) for i in range(0, self.pop_size)]
for indv in individuals:
self._evaluate(indv)
self.population = Population(individuals)
else:
self.population = self.seed_population(population_seeding_pool, time_limit=seeding_time_limit)
def get_configuration(self):
return {
'eval_time_limit': self.eval_time_limit,
'pop_size': self.pop_size,
'cx_prob': self.cx_prob,
'mt_prob': self.mt_prob,
'steady_state': self.steady_state,
'population_seeding_pool': self.population_seeding_pool,
'seeding_time_limit': self.seeding_time_limit
}
def seed_population(self, pool_size, time_limit):
""" Creates a Population using the best runners from a pool of randomly-generated runners
This selects the best `self.pop_size` Individuals from a pool of randomly generated individuals, using
distance achieved as the selection criterion.
If the seeding procedure has already been run, then the individuals will instead be loaded from disk.
Args:
pool_size (int): the size of the randomly generated pool from which the initial population will be drawn
time_limit (int): time limit (in seconds) for each evaluation in the pool
Returns:
totter.evolution.Population.Population: Population seeded with good runners
"""
population_filepath = storage.get(os.path.join(self.__class__.__name__, 'population_seeds'))
population_file = os.path.join(population_filepath, f'seed_{pool_size}_{self.pop_size}.tsd')
# if the population has not previously been seeded, then generate the seeded pop
if not os.path.exists(population_file):
# temporarily set time limit
default_time_limit = self.qwop_evaluator.simulator.time_limit
self.qwop_evaluator.simulator.time_limit = time_limit
# generate pool of random individuals
pool = [Individual(self.generate_random_genome()) for i in range(0, pool_size)]
candidates = list()
for indv in pool:
# custom evaluation
phenotype = self.genome_to_phenotype(indv.genome)
strategy = QwopStrategy(execution_function=phenotype)
distance, run_time = self.qwop_evaluator.evaluate(strategy)[0]
indv.fitness = self.compute_fitness(distance, run_time)
candidates.append((indv, distance))
# sort by descending distance run
sorted_candidates = sorted(candidates, key=lambda c: -c[1])
avg = sum(map(lambda c: c[1], candidates)) / pool_size
print(f'average fitness of pool: {avg}')
# grab the ones who ran farthest
best_indvs = sorted_candidates[:self.pop_size]
print(f'Best indvs: \n {best_indvs}')
avg = sum(map(lambda c: c[1], best_indvs)) / len(best_indvs)
print(f'average fitness of selected pop: {avg}')
best_indvs = list(map(lambda c: c[0], best_indvs))
# save the individuals found
with open(population_file, 'wb') as data_file:
pickle.dump(best_indvs, data_file)
# reset time limit to its normal value
self.qwop_evaluator.simulator.time_limit = default_time_limit
# load best_individuals from a file
with open(population_file, 'rb') as data_file:
best_indvs = pickle.load(data_file)
return Population(best_indvs)
def advance(self):
""" Advances the GA by one generation
For generational GAs, a generation will replace the entire population.
For a steady-state GA, a generation will only replace two members of the population.
Returns: None
"""
# select parents
if self.steady_state:
parents = self.select_parents(self.population, 2)
else:
parents = self.select_parents(self.population, self.pop_size)
# make children using crossover
offspring = list()
for parent1, parent2 in zip(parents[::2], parents[1::2]):
if random.random() < self.cx_prob:
child1_genome, child2_genome = self.crossover(parent1.genome, parent2.genome)
offspring.append(child1_genome)
offspring.append(child2_genome)
else:
offspring.append(parent1.genome)
offspring.append(parent2.genome)
# mutate then repair
for idx in range(0, len(offspring)):
child_genome = offspring[idx]
if random.random() < self.mt_prob:
child_genome = self.mutate(child_genome)
child_genome = self.repair(child_genome)
# even if the child wasn't mutated, his fitness needs to be re-evaluated
child = Individual(genome=child_genome)
self._evaluate(child)
offspring[idx] = child
# update population
if self.steady_state:
# replace selected parents with children
for child in offspring:
replacement_index = self.replace(self.population, child)
if replacement_index is not None:
self.population.replace(replacement_index, child)
else:
self.population = Population(offspring)
def _evaluate(self, individual):
""" Evaluates an indvidual using the QwopEvaluator and updates the individual's fitness
Args:
individual (Individual): the indvidual to evaluate
Returns: None
"""
phenotype = self.genome_to_phenotype(individual.genome)
strategy = QwopStrategy(execution_function=phenotype)
distance, run_time = self.qwop_evaluator.evaluate(strategy)[0]
individual.fitness = self.compute_fitness(distance, run_time)
self.total_evaluations += 1
@abstractmethod
def generate_random_genome(self):
""" Generates a random genome
Returns:
object: randomly generated genome
"""
pass
@abstractmethod
def genome_to_phenotype(self, genome):
""" Convert a genome to a function that plays QWOP
For example, if the genome is [W, Q, P], then the phenotype might be a function that presses 'W',
then presses 'Q', then presses 'P'.
Returns:
function: function that implements the strategy suggested by the genome
"""
pass
@abstractmethod
def compute_fitness(self, distance_run, run_time):
""" Computes an individual's fitness from the distance run and the time it took
Args:
distance_run (float): distance run in the QWOP simulator
run_time (float): time in seconds that it took to run to `distance`
Returns:
float: computed fitness
"""
pass
@abstractmethod
def select_parents(self, population, n):
""" Selects `n` members for parenthood from `population`
Args:
population (totter.evolution.Population.Population):
the current population
n (int):
the number of parents to select
Returns:
list<Individual>: the individuals selected for parenthood from the given population
"""
pass
@abstractmethod
def crossover(self, parent1, parent2):
""" Crossover parent1 with parent2 and generate two offspring
Args:
parent1 (iterable): the genome of the first parent
parent2 (iterable): the genome of the second parent
Returns:
iterable, iterable: genome of child 1, genome of child 2
"""
pass
@abstractmethod
def mutate(self, genome):
""" Perform mutation on the provided genome
Args:
genome (object): genome to mutate
Returns:
object: mutated genome
"""
pass
@abstractmethod
def repair(self, genome):
""" Repair a genome after crossover or mutation
Args:
genome (object): genome to repair
Returns:
object: genome with repaired contents
"""
pass
@abstractmethod
def replace(self, population, candidate):
""" Select a member of the population which will be replaced by `candidate`
This method should return the index of the population member to replace.
It may also return None, which indicates that `candidate` should be discarded instead of replacing a member
of the population
Args:
population (list<Individual>:
list of Individuals in the population. Each individual has a genome and a fitness.
candidate (Individual): Individual which will replace the selected member
Returns:
int or None:
index of population member to be replaced by `candidate`,
or None if the replacement should not occur
"""
pass
| [
"os.path.exists",
"pickle.dump",
"os.path.join",
"pickle.load",
"totter.evolution.Individual.Individual",
"random.random",
"totter.evolution.Population.Population",
"totter.api.qwop.QwopStrategy",
"totter.api.qwop.QwopEvaluator"
] | [((818, 864), 'totter.api.qwop.QwopEvaluator', 'QwopEvaluator', ([], {'time_limit': 'self.eval_time_limit'}), '(time_limit=self.eval_time_limit)\n', (831, 864), False, 'from totter.api.qwop import QwopEvaluator, QwopStrategy\n'), ((2908, 2982), 'os.path.join', 'os.path.join', (['population_filepath', 'f"""seed_{pool_size}_{self.pop_size}.tsd"""'], {}), "(population_filepath, f'seed_{pool_size}_{self.pop_size}.tsd')\n", (2920, 2982), False, 'import os\n'), ((4913, 4935), 'totter.evolution.Population.Population', 'Population', (['best_indvs'], {}), '(best_indvs)\n', (4923, 4935), False, 'from totter.evolution.Population import Population\n'), ((7127, 7169), 'totter.api.qwop.QwopStrategy', 'QwopStrategy', ([], {'execution_function': 'phenotype'}), '(execution_function=phenotype)\n', (7139, 7169), False, 'from totter.api.qwop import QwopEvaluator, QwopStrategy\n'), ((2823, 2880), 'os.path.join', 'os.path.join', (['self.__class__.__name__', '"""population_seeds"""'], {}), "(self.__class__.__name__, 'population_seeds')\n", (2835, 2880), False, 'import os\n'), ((3088, 3119), 'os.path.exists', 'os.path.exists', (['population_file'], {}), '(population_file)\n', (3102, 3119), False, 'import os\n'), ((4874, 4896), 'pickle.load', 'pickle.load', (['data_file'], {}), '(data_file)\n', (4885, 4896), False, 'import pickle\n'), ((6288, 6319), 'totter.evolution.Individual.Individual', 'Individual', ([], {'genome': 'child_genome'}), '(genome=child_genome)\n', (6298, 6319), False, 'from totter.evolution.Individual import Individual\n'), ((6774, 6795), 'totter.evolution.Population.Population', 'Population', (['offspring'], {}), '(offspring)\n', (6784, 6795), False, 'from totter.evolution.Population import Population\n'), ((1461, 1484), 'totter.evolution.Population.Population', 'Population', (['individuals'], {}), '(individuals)\n', (1471, 1484), False, 'from totter.evolution.Population import Population\n'), ((3636, 3678), 'totter.api.qwop.QwopStrategy', 'QwopStrategy', ([], {'execution_function': 'phenotype'}), '(execution_function=phenotype)\n', (3648, 3678), False, 'from totter.api.qwop import QwopEvaluator, QwopStrategy\n'), ((4588, 4622), 'pickle.dump', 'pickle.dump', (['best_indvs', 'data_file'], {}), '(best_indvs, data_file)\n', (4599, 4622), False, 'import pickle\n'), ((5569, 5584), 'random.random', 'random.random', ([], {}), '()\n', (5582, 5584), False, 'import random\n'), ((6039, 6054), 'random.random', 'random.random', ([], {}), '()\n', (6052, 6054), False, 'import random\n')] |
#!/usr/bin/env python
import setuptools
with open('README.md', 'r') as fp:
long_description = fp.read()
setuptools.setup(
name='predecon-exioreed',
version='0.1.1',
author='<NAME>',
author_email='<EMAIL>',
description='PreDeCon - An Implementation in Python, Compatible With Scikit-Learn',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/exioReed/PreDeCon.git',
packages=setuptools.find_packages(),
install_requires=[
'joblib>=0.14.0',
'numpy>=1.15.4',
'scikit-learn>=0.22.1',
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
],
python_requires='>=3.6',
) | [
"setuptools.find_packages"
] | [((472, 498), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (496, 498), False, 'import setuptools\n')] |
#!/usr/bin/python3
__author__ = '<NAME>'
__version__ = '2.0.0'
__name__ = 'cli.py'
__license__ = 'GPL2'
__description__ = 'CLI interface for Simple Cypher.'
import os
# import sys
import simple_cypher
from cmd import Cmd
#cipher = "F daFJeefn n LB-eheadty.AA1lta oiwyGW18 r a-8"
# cipher = "iveghny ynxr"
# if len(sys.argv) == 2:
# cipher = sys.argv[1]
# simp_ciph = simple_cipher.simple_cipher(cipher)
# simp_ciph.decrypt()
# cipher = "iveghny ynxr"
###########################
###########################
### Max Min Prompt Class
class Cypher_Prompt(Cmd):
prompt = "\U0001F63C > "
_verbosity = 0
_cipher = ''
_external = 0
def do_decode(self,args):
'Help text'
cipher = self._cipher
verbosity = self._verbosity
if self._external:
os.system('nohup xfce4-terminal -H -x python3 simple_cypher.py {:s} "{:s}" 2>/dev/null&'
.format(" -v"*verbosity,cipher))
print(self.prompt)
else:
simp_ciph = simple_cypher.simple_cypher(cipher,verbosity)
simp_ciph.decrypt()
# def do_decode_external(self,args):
# 'Help text'
# cipher = "iveghny ynxr"
# verbosity = self._verbosity
# verbosity = 2
#
# # os.system('xfce4-terminal -H -x python3 {:s}/simple_cypher.py -v -v -v -v "iveghny ynxr"'.format(os.getcwd())
#
def do_setup(self,args):
'Help text'
cipher = ''
cipher = str(input('Cipher:'))
if not cipher:
self._cipher = ''
else:
self._cipher = cipher
verb = 0
try:
verb = int(input('Verbosity[1-4][0]: '))
if verb:
self._verbosity = verb
except ValueError:
self._verbosity = 0
self._external = 0
external = str(input("Launch externally[n]: "))
if external.lower() == 'y':
self._external = 1
print("Cipher: {}".format(self._cipher))
print("Vebose: {}".format(self._verbosity))
print("Extern: {}".format(self._external))
def do_exit(self, args):
'Help text'
## Clear screen
print('\x1bc')
print("Exiting...")
exit()
def do_bye(self, args):
'Help text'
self.do_exit(args)
return False
## Catch all
def default(self, args):
print("Bad command: %s" %args)
######################
## PRIVATE FUNCTION ##
######################
def set_verbosity(self,verbosity):
self._verbosity = verbosity
def set_cipher(self,cipher):
self._cipher = cipher
def emptyline(self):
pass
cp = Cypher_Prompt()
cp.cmdloop()
# xfce-terminal -x nohup {cmd}
# choose pop up external terminal
# - xterm
# - terminator
# - xfce4-terminal
# xfce4-terminal -H -x nmap -sn 192.168.1.1 | [
"simple_cypher.simple_cypher"
] | [((1030, 1076), 'simple_cypher.simple_cypher', 'simple_cypher.simple_cypher', (['cipher', 'verbosity'], {}), '(cipher, verbosity)\n', (1057, 1076), False, 'import simple_cypher\n')] |
# ++++++++++++++++++++++++++++++++++++++ #
# Functions & classes for building model #
# ++++++++++++++++++++++++++++++++++++++ #
# Import libraries
import argparse
import numpy as np
import json
import os
import time
import torch
import torch.nn.functional as F
#from torchvision import datasets, transforms, models
from torchvision import models
#from torch.utils.data import DataLoader
from torch import nn
from torch import optim
# Define class for model classifier
class Classifier(nn.Module):
''' Creates deep learning model to use as classifier/fc
in pre-trained PyTorch model
'''
# Define network architechture
def __init__(self, num_inputs, num_outputs, num_hidden):
super().__init__()
# Hidden layers
if num_hidden is not None:
self.hidden_layers = nn.ModuleList([nn.Linear(num_inputs, num_hidden[0])])
hidden_sizes = zip(num_hidden[:-1], num_hidden[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in hidden_sizes])
# Output
self.output = nn.Linear(num_hidden[-1], num_outputs)
else:
# Output
self.output = nn.Linear(num_inputs, num_outputs)
# Define forward pass
def forward(self, x):
try:
for linear in self.hidden_layers:
x = F.relu(linear(x))
x = self.output(x)
except AttributeError:
x = self.output(x)
return F.log_softmax(x, dim = 1)
# Create model from pre-trained architecture
def load_architecture(architecture, num_outputs, num_hidden = None):
''' Loads model architecture of pre-trained PyTorch model
and changes the models classifier/fc object based on
classes in given data set and hidden layers
'''
# Load model
print('Loading architecture of pre-trained {:s} model ...'.format(architecture))
model = models.__dict__[architecture](pretrained = True)
# Freeze parameters of pretrained model
for param in model.parameters():
param.requires_grad = False
# Get number of classifier input features & change classifier (start with default)
if architecture.startswith('vgg'):
num_inputs = model.classifier[0].in_features
model.classifier = Classifier(num_inputs, num_outputs, num_hidden)
elif architecture.startswith('alex'):
num_inputs = model.classifier[1].in_features
model.classifier = Classifier(num_inputs, num_outputs, num_hidden)
elif architecture.startswith('dense'):
num_inputs = model.classifier.in_features
model.classifier = Classifier(num_inputs, num_outputs, num_hidden)
elif architecture.startswith('incep') or architecture.startswith('res'):
num_inputs = model.fc.in_features
model.fc = Classifier(num_inputs, num_outputs, num_hidden)
return model
# Define optimizer
def load_optimizer(model, architecture, learning_rate):
''' Loads optimizer for model training based on
architecture of pre-trained model
'''
if architecture.startswith('alex') or architecture.startswith('dense') or architecture.startswith('vgg'):
optimizer = optim.Adam(model.classifier.parameters(), lr = learning_rate)
else:
optimizer = optim.Adam(model.fc.parameters(), lr = learning_rate)
return optimizer
# Define network training function
def run_training(model, train_loader, valid_loader, criterion, optimizer, num_epochs, gpu = False):
''' Runs deep learning model training on training data and reports
loss and accuracy on training as well as validation data
'''
start_time = time.time()
training_steps = 0
#num_epochs = int(num_epochs)
# Change model to CUDA (if available)
if gpu:
torch_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch_device.type == 'cpu':
print('Warning: GPU training not available.')
else:
torch_device = torch.device("cpu")
model.to(torch_device)
# Run training
print('Starting model training on {:s} ...'.format(torch_device.type.upper()))
model.train()
for e in range(num_epochs):
running_loss = 0.0
running_corrects = 0
running_totals = 0
for ii, (images, labels) in enumerate(train_loader):
training_steps += 1
# Change inputs/labels to CUDA (if available, see above)
images, labels = images.to(torch_device), labels.to(torch_device)
# Set gradients back to zero
optimizer.zero_grad()
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
# Calculate training loss & accuracy on current batch
running_loss += loss.item()
_, preds = torch.max(output, 1)
running_corrects += torch.sum(preds == labels.data).item()
running_totals += labels.size(0)
# Report progress every 50 iterations
if training_steps % 25 == 0:
# Set model to validation mode (done in validation method) & evaluate current performance
valid_loss, valid_accuracy = run_validation(model, valid_loader, criterion, torch_device)
# Print current performance
print("Epoch: {}/{} ...".format(
e + 1, num_epochs))
print(" Train Loss: {:.3f} Train Accuracy: {:.3f}".format(
running_loss / 25, running_corrects / running_totals))
print(" Valid Loss: {:.3f} Valid Accuracy: {:.3f}\n".format(
valid_loss, valid_accuracy))
# Set running variables of current iteration back to 0
running_loss = 0.0
running_corrects = 0
running_totals = 0
# Set model back to training mode (just to be sure, but is also done in "run_validation" method)
model.train()
# Training duration
train_time = time.time() - start_time
print('Training complete.\n Total training time: {:.0f}m {:.0f}s'.format(
train_time // 60, train_time % 60))
# Return model
return model
# Define validation function for training
def run_validation(model, valid_loader, criterion, torch_device):
''' Runs validation pass on full set of testing data
and returns loss and accuracy
'''
loss = 0
total = 0
correct = 0
# Change model to training device
model.to(torch_device)
# Set model to evaluation mode & turn off gradients for validation
model.eval()
with torch.no_grad():
for images, labels in valid_loader:
# Change inputs/labels to CUDA (if available)
images, labels = images.to(torch_device), labels.to(torch_device)
# Forward pass
output = model.forward(images)
# Calculate loss on current batch
loss += criterion(output, labels).item()
# Calculate number of correctly predicted labels & batch size
_, preds = torch.max(output.data, 1)
#correct += (predicted == labels).sum().item()
correct += torch.sum(preds == labels.data).item()
total += labels.size(0)
# Set model back to training mode
model.train()
return (loss / len(valid_loader)), (correct / total)
# Save trained model as checkpoint
def save_checkpoint(model, optimizer, epochs, class_to_idx, save_dir):
''' Saves checkpoint model and parameters necessary for rebuilding
model in order to resume training or to use for inference
'''
# Attach mapping of classes to indeces
model.class_to_idx = class_to_idx
# Save checkpoint
checkpoint = {
'model' : model,
'model_state_dict': model.state_dict(),
'optimizer': optimizer,
'optimizer_state_dict': optimizer.state_dict(),
'class_to_idx': model.class_to_idx,
'epochs': epochs
}
save_dir = save_dir + '/model_checkpoint.pth'
print('Saving model checkpoint at {:s} ...'.format(save_dir))
torch.save(checkpoint, save_dir)
# Define function to loads a checkpoint and rebuild model
def load_checkpoint(checkpoint_path):
''' Loads checkpoint model from file path and rebuilds model '''
# Load checkpoint
checkpoint = torch.load(checkpoint_path, map_location = lambda storage, loc: storage)
# Re-define model & freeze parameters
model = checkpoint['model']
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['model_state_dict'])
# Load remaining information
optimizer = checkpoint['optimizer']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochs = checkpoint['epochs']
print('Loading model checkpoint from \'{:s}\' ...'.format(checkpoint_path))
return model, optimizer, epochs
# Define function for predicting image class(es)
def predict(image, model, topk = 1, category_names = None, gpu = False):
''' Predict the class (or classes) of a pre-pocessed image
using a trained deep learning model.
'''
# Pre-process image
image = torch.from_numpy(image).float()
image.unsqueeze_(0)
# Change model to CUDA (if available)
if gpu:
torch_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch_device.type == 'cpu':
print('Warning: GPU prediction not available.')
else:
torch_device = torch.device("cpu")
model.to(torch_device)
image = image.to(torch_device)
# Run class prediction with forward pass
print('Starting prediction on {:s} ...'.format(torch_device.type.upper()))
# Turn off gradients for validation, saves memory and computations
model.eval()
with torch.no_grad():
output = model.forward(image)
# Get probabilities from log probabilities
probs = torch.exp(output).cpu()
# Extract top K probabilities and corresponding indeces
probs, labels = probs.topk(int(topk))
#probs, labels = probs.numpy().tolist()[0], labels.numpy().tolist()[0]
probs, labels = probs.numpy()[0], labels.numpy().tolist()[0]
# Invert key value pairs from class_to_idx and save classes
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
classes = [idx_to_class[key] for key in labels]
# Return top K probabilites and corresponding classes
# If mapping file is given return real names instead of classes
if category_names is not None:
# Get real names
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
flowers = [cat_to_name[c] for c in classes]
# Return top K probabilites and corresponding names
print('Probabilities:', *probs, sep = ', ')
print('Flowers:', *flowers, sep = ', ')
return probs, flowers
else:
# Return top K probabilites and corresponding classes
print('Probabilities:', *probs, sep = ' ')
print('Flowers:', *classes, sep = ' ')
return probs, classes | [
"torch.load",
"torch.max",
"torch.from_numpy",
"torch.exp",
"json.load",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.functional.log_softmax",
"torch.save",
"torch.nn.Linear",
"torch.no_grad",
"time.time",
"torch.device"
] | [((3714, 3725), 'time.time', 'time.time', ([], {}), '()\n', (3723, 3725), False, 'import time\n'), ((8449, 8481), 'torch.save', 'torch.save', (['checkpoint', 'save_dir'], {}), '(checkpoint, save_dir)\n', (8459, 8481), False, 'import torch\n'), ((8693, 8763), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '(lambda storage, loc: storage)'}), '(checkpoint_path, map_location=lambda storage, loc: storage)\n', (8703, 8763), False, 'import torch\n'), ((1501, 1524), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1514, 1524), True, 'import torch.nn.functional as F\n'), ((4058, 4077), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4070, 4077), False, 'import torch\n'), ((6264, 6275), 'time.time', 'time.time', ([], {}), '()\n', (6273, 6275), False, 'import time\n'), ((6885, 6900), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6898, 6900), False, 'import torch\n'), ((9937, 9956), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9949, 9956), False, 'import torch\n'), ((10241, 10256), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10254, 10256), False, 'import torch\n'), ((1095, 1133), 'torch.nn.Linear', 'nn.Linear', (['num_hidden[-1]', 'num_outputs'], {}), '(num_hidden[-1], num_outputs)\n', (1104, 1133), False, 'from torch import nn\n'), ((1195, 1229), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'num_outputs'], {}), '(num_inputs, num_outputs)\n', (1204, 1229), False, 'from torch import nn\n'), ((5011, 5031), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (5020, 5031), False, 'import torch\n'), ((7400, 7425), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (7409, 7425), False, 'import torch\n'), ((9604, 9627), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (9620, 9627), False, 'import torch\n'), ((10360, 10377), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (10369, 10377), False, 'import torch\n'), ((11083, 11095), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11092, 11095), False, 'import json\n'), ((3890, 3915), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3913, 3915), False, 'import torch\n'), ((9767, 9792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9790, 9792), False, 'import torch\n'), ((850, 886), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'num_hidden[0]'], {}), '(num_inputs, num_hidden[0])\n', (859, 886), False, 'from torch import nn\n'), ((992, 1009), 'torch.nn.Linear', 'nn.Linear', (['h1', 'h2'], {}), '(h1, h2)\n', (1001, 1009), False, 'from torch import nn\n'), ((5064, 5095), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (5073, 5095), False, 'import torch\n'), ((7508, 7539), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (7517, 7539), False, 'import torch\n')] |
import textwrap
import click
from . import __version__, wikipedia
@click.command()
@click.version_option(version=__version__)
def main():
"""The wikifun Python project."""
data = wikipedia.random_page()
title = data["title"]
extract = data["extract"]
click.secho(title, fg="green")
click.echo(textwrap.fill(extract))
| [
"textwrap.fill",
"click.command",
"click.version_option",
"click.secho"
] | [((71, 86), 'click.command', 'click.command', ([], {}), '()\n', (84, 86), False, 'import click\n'), ((88, 129), 'click.version_option', 'click.version_option', ([], {'version': '__version__'}), '(version=__version__)\n', (108, 129), False, 'import click\n'), ((277, 307), 'click.secho', 'click.secho', (['title'], {'fg': '"""green"""'}), "(title, fg='green')\n", (288, 307), False, 'import click\n'), ((323, 345), 'textwrap.fill', 'textwrap.fill', (['extract'], {}), '(extract)\n', (336, 345), False, 'import textwrap\n')] |
import sys
foreground={
'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m'
}
background={
'black': '\033[40m',
'red': '\033[41m',
'green': '\033[42m',
'yellow': '\033[43m',
'blue': '\033[44m',
'magenta': '\033[45m',
'cyan': '\033[46m',
'white': '\033[47m'
}
def set_colors(fg,bg,bold):
cmd=''
if fg in foreground:
cmd=cmd+foreground.get(fg)
#if bg in background:
# cmd=cmd+background.get(bg)
if bold:
cmd=cmd+'\033[1m'
else:
cmd = cmd + '\033[22m'
sys.stdout.write(cmd)
| [
"sys.stdout.write"
] | [((719, 740), 'sys.stdout.write', 'sys.stdout.write', (['cmd'], {}), '(cmd)\n', (735, 740), False, 'import sys\n')] |
import re
from io import StringIO
from html.parser import HTMLParser
from feedparser_wrapper.html_entities import replace_html_escapes
class MLStripper(HTMLParser):
def __init__(self) -> None:
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs = True
self.text = StringIO()
def handle_data(self, d) -> None:
self.text.write(d)
def get_data(self) -> str:
return self.text.getvalue()
def _strip_once(text: str) -> str:
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(text)
s.close()
return s.get_data()
def strip_tags(text: str) -> str:
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
text = str(text)
while '<' in text and '>' in text:
new_value = _strip_once(text)
if text.count('<') == new_value.count('<'):
# _strip_once wasn't able to detect more tags.
break
text = new_value
return text
def replace_spaces_with_one_space(text: str) -> str:
return re.sub(r'\s+', ' ', str(text))
def strip_spaces_between_tags(value: str) -> str:
"""Return the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', str(value))
def sanitize(text: str) -> str:
"""Convert text to plain."""
text = strip_spaces_between_tags(text)
text = strip_tags(text)
text = replace_spaces_with_one_space(text)
text = replace_html_escapes(text)
return text
| [
"io.StringIO",
"feedparser_wrapper.html_entities.replace_html_escapes"
] | [((1636, 1662), 'feedparser_wrapper.html_entities.replace_html_escapes', 'replace_html_escapes', (['text'], {}), '(text)\n', (1656, 1662), False, 'from feedparser_wrapper.html_entities import replace_html_escapes\n'), ((333, 343), 'io.StringIO', 'StringIO', ([], {}), '()\n', (341, 343), False, 'from io import StringIO\n')] |
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('about.views',
(r'^$', 'index'),
)
| [
"django.conf.urls.defaults.patterns"
] | [((76, 116), 'django.conf.urls.defaults.patterns', 'patterns', (['"""about.views"""', "('^$', 'index')"], {}), "('about.views', ('^$', 'index'))\n", (84, 116), False, 'from django.conf.urls.defaults import patterns, include, url\n')] |
"""Tests for the fuzzy_penguin/fuzz_string_list.py file"""
import os
import pytest
from fuzzy_penguin import fuzz_string_list
@pytest.fixture
def fuzz_string_object():
return fuzz_string_list.FuzzStringList()
def test_set_fuzz_file_no_fuzz_file_specified(fuzz_string_object):
fuzz_string_object._set_fuzz_file()
assert 'default_fuzz_list.txt' in fuzz_string_object._file_to_load
def test_set_fuzz_file_fuzz_file_specified_and_populated(fuzz_string_object):
dir_path = os.path.dirname(os.path.realpath(__file__))
fuzz_string_object._set_fuzz_file(os.path.join(dir_path,
'simple_fuzz_file.txt'))
assert 'simple_fuzz_file.txt' in fuzz_string_object._file_to_load
def test_set_fuzz_file_fuzz_file_specified_but_empty(fuzz_string_object):
dir_path = os.path.dirname(os.path.realpath(__file__))
with pytest.raises(EOFError):
fuzz_string_object._set_fuzz_file(os.path.join(dir_path,
'blank_fuzz_file.txt'))
def test_set_fuzz_file_file_does_not_exist(fuzz_string_object):
with pytest.raises(FileNotFoundError):
fuzz_string_object._set_fuzz_file('does_not_exist.txt')
def test_load_fuzz_file_correct_number_strings_loaded(fuzz_string_object):
dir_path = os.path.dirname(os.path.realpath(__file__))
fuzz_string_object._set_fuzz_file(os.path.join(dir_path,
'simple_fuzz_file.txt'))
fuzz_string_object.load_fuzz_file()
assert len(fuzz_string_object.string_list) == 3
| [
"os.path.realpath",
"os.path.join",
"fuzzy_penguin.fuzz_string_list.FuzzStringList",
"pytest.raises"
] | [((184, 217), 'fuzzy_penguin.fuzz_string_list.FuzzStringList', 'fuzz_string_list.FuzzStringList', ([], {}), '()\n', (215, 217), False, 'from fuzzy_penguin import fuzz_string_list\n'), ((509, 535), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (525, 535), False, 'import os\n'), ((575, 621), 'os.path.join', 'os.path.join', (['dir_path', '"""simple_fuzz_file.txt"""'], {}), "(dir_path, 'simple_fuzz_file.txt')\n", (587, 621), False, 'import os\n'), ((851, 877), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (867, 877), False, 'import os\n'), ((888, 911), 'pytest.raises', 'pytest.raises', (['EOFError'], {}), '(EOFError)\n', (901, 911), False, 'import pytest\n'), ((1132, 1164), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (1145, 1164), False, 'import pytest\n'), ((1338, 1364), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1354, 1364), False, 'import os\n'), ((1404, 1450), 'os.path.join', 'os.path.join', (['dir_path', '"""simple_fuzz_file.txt"""'], {}), "(dir_path, 'simple_fuzz_file.txt')\n", (1416, 1450), False, 'import os\n'), ((955, 1000), 'os.path.join', 'os.path.join', (['dir_path', '"""blank_fuzz_file.txt"""'], {}), "(dir_path, 'blank_fuzz_file.txt')\n", (967, 1000), False, 'import os\n')] |
import logging
import torch
import numpy as np
import os.path as osp
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class SiameseVgg3d(torch.nn.Module):
"""
TODO
"""
def __init__(self,
input_size,
input_fmaps=1,
fmaps=32,
fmaps_max=512,
output_features=10,
downsample_factors=[(2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)]):
"""
TODO
Args:
input_size:
input_fmaps:
fmaps:
downsample_factors:
"""
super(SiameseVgg3d, self).__init__()
downsample_factors = np.array(downsample_factors)
current_fmaps = input_fmaps
current_size = np.array(input_size)
features = []
for i in range(len(downsample_factors)):
features += [
torch.nn.Conv3d(
in_channels=current_fmaps,
out_channels=fmaps,
kernel_size=3,
padding=1),
torch.nn.BatchNorm3d(
num_features=fmaps),
torch.nn.ReLU(
inplace=True),
torch.nn.Conv3d(
in_channels=fmaps,
out_channels=fmaps,
kernel_size=3,
padding=1),
torch.nn.BatchNorm3d(
num_features=fmaps),
torch.nn.ReLU(
inplace=True),
torch.nn.MaxPool3d(
kernel_size=tuple(downsample_factors[i]))
]
current_fmaps = fmaps
fmaps = min(fmaps_max, fmaps * 2)
size = np.floor(current_size / downsample_factors[i])
# TODO come up with a better rule
# assert np.all((size * downsample_factors[i]) == current_size), \
# "Can not downsample %s by chosen downsample factor" % current_size
current_size = size
logger.info(
f'VGG level {i}: ({current_size}), {current_fmaps} fmaps')
self.features = torch.nn.Sequential(*features)
num_features = int(
current_size[0] * current_size[1] * current_size[2] * current_fmaps)
logger.info(f'inputs to fc: {num_features}')
fc_size = [2048]
fully_connected = [
torch.nn.Linear(
num_features,
fc_size[0]),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(
fc_size[0],
output_features)
]
logger.info(f'fc level 0: {fc_size[0]}')
logger.info(f'output: {output_features}')
self.fully_connected = torch.nn.Sequential(*fully_connected)
def forward_once(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.fully_connected(output)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
def train(self, mode=True):
"""
on top of pytorch's module mode, change the `requires_grad`
attribute of all module parameters
Args:
mode (bool): True = train, False = eval
Returns:
"""
ret = super().train(mode=mode)
for param in self.parameters():
param.requires_grad = mode
return ret
| [
"logging.getLogger",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"numpy.floor",
"numpy.array",
"torch.nn.Linear",
"torch.nn.BatchNorm3d",
"torch.nn.Conv3d"
] | [((79, 106), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (96, 106), False, 'import logging\n'), ((691, 719), 'numpy.array', 'np.array', (['downsample_factors'], {}), '(downsample_factors)\n', (699, 719), True, 'import numpy as np\n'), ((779, 799), 'numpy.array', 'np.array', (['input_size'], {}), '(input_size)\n', (787, 799), True, 'import numpy as np\n'), ((2182, 2212), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*features'], {}), '(*features)\n', (2201, 2212), False, 'import torch\n'), ((2823, 2860), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*fully_connected'], {}), '(*fully_connected)\n', (2842, 2860), False, 'import torch\n'), ((1767, 1813), 'numpy.floor', 'np.floor', (['(current_size / downsample_factors[i])'], {}), '(current_size / downsample_factors[i])\n', (1775, 1813), True, 'import numpy as np\n'), ((2442, 2483), 'torch.nn.Linear', 'torch.nn.Linear', (['num_features', 'fc_size[0]'], {}), '(num_features, fc_size[0])\n', (2457, 2483), False, 'import torch\n'), ((2530, 2557), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2543, 2557), False, 'import torch\n'), ((2571, 2589), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {}), '()\n', (2587, 2589), False, 'import torch\n'), ((2603, 2647), 'torch.nn.Linear', 'torch.nn.Linear', (['fc_size[0]', 'output_features'], {}), '(fc_size[0], output_features)\n', (2618, 2647), False, 'import torch\n'), ((914, 1007), 'torch.nn.Conv3d', 'torch.nn.Conv3d', ([], {'in_channels': 'current_fmaps', 'out_channels': 'fmaps', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=current_fmaps, out_channels=fmaps, kernel_size=\n 3, padding=1)\n', (929, 1007), False, 'import torch\n'), ((1101, 1141), 'torch.nn.BatchNorm3d', 'torch.nn.BatchNorm3d', ([], {'num_features': 'fmaps'}), '(num_features=fmaps)\n', (1121, 1141), False, 'import torch\n'), ((1180, 1207), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1193, 1207), False, 'import torch\n'), ((1246, 1331), 'torch.nn.Conv3d', 'torch.nn.Conv3d', ([], {'in_channels': 'fmaps', 'out_channels': 'fmaps', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=fmaps, out_channels=fmaps, kernel_size=3, padding=1\n )\n', (1261, 1331), False, 'import torch\n'), ((1425, 1465), 'torch.nn.BatchNorm3d', 'torch.nn.BatchNorm3d', ([], {'num_features': 'fmaps'}), '(num_features=fmaps)\n', (1445, 1465), False, 'import torch\n'), ((1504, 1531), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1517, 1531), False, 'import torch\n')] |
from pyramid import testing
from pyramid.response import Response
from unittest import TestCase
from pyramid_restful.views import APIView
class MyView(APIView):
def get(self, request, *args, **kwargs):
return Response({'method': 'GET'})
def post(self, request, *args, **kwargs):
return Response({'method': 'POST', 'data': request.body})
class InitKwargsView(APIView):
def get(self, request, *args, **kwargs):
return Response({'name': self.name})
class ExceptionView(APIView):
def get(self, request, *args, **kwargs):
raise Exception('test exception')
class APIViewTests(TestCase):
def setUp(self):
self.test_view = MyView.as_view(name='MyView')
self.request = testing.DummyRequest()
def test_implemented_method_dispatch(self):
response = self.test_view(self.request)
expected = {'method': 'GET'}
assert response.status_code == 200
assert response.body == expected
def test_method_not_allowed(self):
self.request.method = 'PUT'
response = self.test_view(self.request)
assert response.status_code == 405
def test_initkwargs(self):
view = InitKwargsView.as_view(name='test')
response = view(self.request)
expected = {'name': 'test'}
assert response.body == expected
def test_raises_exception(self):
view = ExceptionView.as_view()
self.assertRaises(Exception, view, self.request)
def test_invalid_method_exception(self):
self.request.method = 'PUTZ'
response = self.test_view(self.request)
assert response.status_code == 405
def test_options_request(self):
self.request.method = 'OPTIONS'
response = self.test_view(self.request)
assert response.headers.get('Allow') == "GET, POST, OPTIONS"
| [
"pyramid.response.Response",
"pyramid.testing.DummyRequest"
] | [((226, 253), 'pyramid.response.Response', 'Response', (["{'method': 'GET'}"], {}), "({'method': 'GET'})\n", (234, 253), False, 'from pyramid.response import Response\n'), ((316, 366), 'pyramid.response.Response', 'Response', (["{'method': 'POST', 'data': request.body}"], {}), "({'method': 'POST', 'data': request.body})\n", (324, 366), False, 'from pyramid.response import Response\n'), ((461, 490), 'pyramid.response.Response', 'Response', (["{'name': self.name}"], {}), "({'name': self.name})\n", (469, 490), False, 'from pyramid.response import Response\n'), ((743, 765), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (763, 765), False, 'from pyramid import testing\n')] |
#!/usr/bin/env python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to create a topic with the Pub/Sub
Lite API. For more information, see the root level README.md and the
documentation at https://cloud.google.com/pubsub/lite/docs/topics.
"""
import argparse
def create_lite_topic(project_number, cloud_region, zone_id, topic_id, num_partitions):
# [START pubsublite_create_topic]
from google.api_core.exceptions import AlreadyExists
from google.cloud.pubsublite import AdminClient, Topic
from google.cloud.pubsublite.types import CloudRegion, CloudZone, TopicPath
from google.protobuf.duration_pb2 import Duration
# TODO(developer):
# project_number = 1122334455
# cloud_region = "us-central1"
# zone_id = "a"
# topic_id = "your-topic-id"
# num_partitions = 1
cloud_region = CloudRegion(cloud_region)
location = CloudZone(cloud_region, zone_id)
topic_path = TopicPath(project_number, location, topic_id)
topic = Topic(
name=str(topic_path),
partition_config=Topic.PartitionConfig(
# A topic must have at least one partition.
count=num_partitions,
# Set throughput capacity per partition in MiB/s.
capacity=Topic.PartitionConfig.Capacity(
# Set publish throughput capacity per partition to 4 MiB/s. Must be >= 4 and <= 16.
publish_mib_per_sec=4,
# Set subscribe throughput capacity per partition to 4 MiB/s. Must be >= 4 and <= 32.
subscribe_mib_per_sec=8,
),
),
retention_config=Topic.RetentionConfig(
# Set storage per partition to 30 GiB. This must be in the range 30 GiB-10TiB.
# If the number of byptes stored in any of the topic's partitions grows beyond
# this value, older messages will be dropped to make room for newer ones,
# regardless of the value of `period`.
per_partition_bytes=30 * 1024 * 1024 * 1024,
# Allow messages to be retained for 7 days.
period=Duration(seconds=60 * 60 * 24 * 7),
),
)
client = AdminClient(cloud_region)
try:
response = client.create_topic(topic)
print(f"{response.name} created successfully.")
except AlreadyExists:
print(f"{topic_path} already exists.")
# [END pubsublite_create_topic]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("project_number", help="Your Google Cloud Project Number")
parser.add_argument("cloud_region", help="Your Cloud Region, e.g. 'us-central1'")
parser.add_argument("zone_id", help="Your Zone ID, e.g. 'a'")
parser.add_argument("topic_id", help="Your topic ID")
parser.add_argument(
"num_partitions", type=int, help="Number of partitions in the topic"
)
args = parser.parse_args()
create_lite_topic(
args.project_number,
args.cloud_region,
args.zone_id,
args.topic_id,
args.num_partitions,
)
| [
"google.cloud.pubsublite.types.CloudZone",
"google.protobuf.duration_pb2.Duration",
"argparse.ArgumentParser",
"google.cloud.pubsublite.AdminClient",
"google.cloud.pubsublite.Topic.PartitionConfig.Capacity",
"google.cloud.pubsublite.types.CloudRegion",
"google.cloud.pubsublite.types.TopicPath"
] | [((1418, 1443), 'google.cloud.pubsublite.types.CloudRegion', 'CloudRegion', (['cloud_region'], {}), '(cloud_region)\n', (1429, 1443), False, 'from google.cloud.pubsublite.types import CloudRegion, CloudZone, TopicPath\n'), ((1459, 1491), 'google.cloud.pubsublite.types.CloudZone', 'CloudZone', (['cloud_region', 'zone_id'], {}), '(cloud_region, zone_id)\n', (1468, 1491), False, 'from google.cloud.pubsublite.types import CloudRegion, CloudZone, TopicPath\n'), ((1509, 1554), 'google.cloud.pubsublite.types.TopicPath', 'TopicPath', (['project_number', 'location', 'topic_id'], {}), '(project_number, location, topic_id)\n', (1518, 1554), False, 'from google.cloud.pubsublite.types import CloudRegion, CloudZone, TopicPath\n'), ((2731, 2756), 'google.cloud.pubsublite.AdminClient', 'AdminClient', (['cloud_region'], {}), '(cloud_region)\n', (2742, 2756), False, 'from google.cloud.pubsublite import AdminClient, Topic\n'), ((3019, 3122), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (3042, 3122), False, 'import argparse\n'), ((1825, 1903), 'google.cloud.pubsublite.Topic.PartitionConfig.Capacity', 'Topic.PartitionConfig.Capacity', ([], {'publish_mib_per_sec': '(4)', 'subscribe_mib_per_sec': '(8)'}), '(publish_mib_per_sec=4, subscribe_mib_per_sec=8)\n', (1855, 1903), False, 'from google.cloud.pubsublite import AdminClient, Topic\n'), ((2664, 2698), 'google.protobuf.duration_pb2.Duration', 'Duration', ([], {'seconds': '(60 * 60 * 24 * 7)'}), '(seconds=60 * 60 * 24 * 7)\n', (2672, 2698), False, 'from google.protobuf.duration_pb2 import Duration\n')] |
import logging
logging.basicConfig(filename="./log.data", datefmt="%y/%m/%d",
format="%(levelname)s: %(asctime)s : %(message)s")
logging.info('Info only')
logging.error('Oops, thats not good')
logging.critical("Didn't think so")
| [
"logging.basicConfig",
"logging.info",
"logging.error",
"logging.critical"
] | [((15, 133), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./log.data"""', 'datefmt': '"""%y/%m/%d"""', 'format': '"""%(levelname)s: %(asctime)s : %(message)s"""'}), "(filename='./log.data', datefmt='%y/%m/%d', format=\n '%(levelname)s: %(asctime)s : %(message)s')\n", (34, 133), False, 'import logging\n'), ((149, 174), 'logging.info', 'logging.info', (['"""Info only"""'], {}), "('Info only')\n", (161, 174), False, 'import logging\n'), ((175, 212), 'logging.error', 'logging.error', (['"""Oops, thats not good"""'], {}), "('Oops, thats not good')\n", (188, 212), False, 'import logging\n'), ((213, 248), 'logging.critical', 'logging.critical', (['"""Didn\'t think so"""'], {}), '("Didn\'t think so")\n', (229, 248), False, 'import logging\n')] |
import numpy as np
import mxnet as mx
import mxnet.ndarray as nd
from utils.profiler_utils import timer
@timer
def nd_forward_backward_and_time(F, runs, *args, **kwargs):
"""Helper function to run a given NDArray operator (F) for 'runs' number of times with
given args and kwargs. Executes both forward and backward pass.
NOTE: This is a sync call and waits for all the operations execution to complete.
:param F: NDArray operator (Function reference) to execute. Example: mx.nd.add
:param runs: Number of times to execute the operation
:param args: Arguments for the NDArray operator (F) being executed.
:param kwargs: Key value arguments for the NDArray operator (F) being executed.
:return: Tuple of (Total execution time in seconds, any results from NDArray operation execution)
"""
for _ in range(runs):
with mx.autograd.record():
res = F(*args, **kwargs)
res.backward()
nd.waitall()
@timer
def nd_forward_and_time(F, runs, *args, **kwargs):
"""Helper function to run a given NDArray operator (F) for 'runs' number of times with
given args and kwargs. Executes ONLY forward pass.
NOTE: This is a sync call and waits for all the operations execution to complete.
:param F: NDArray operator (Function feference) to execute. Example: mx.nd.add
:param runs: Number of time to execute the operation
:param args: Arguments for the NDArray operator (F) being executed.
:param kwargs: Key value arguments for the NDArray operator (F) being executed.
:return: Tuple(Total execution time in seconds, any results from NDArray operation execution)
"""
for _ in range(runs):
F(*args, **kwargs)
nd.waitall()
def get_mx_ndarray(ctx, in_tensor, dtype, initializer, attach_grad=True):
"""Helper function to prepare a MXNet NDArray tensor in given Context (ctx) of type (dtype) with given
initializer. You can get a new Tensor by providing only "Shape" or "Numpy NDArray" or another MXNet NDArray as
"in_tensor".
NOTE: This is a sync call and waits for the Tensor to be created.
:param ctx: Context of the new MXNet NDArray Tensor.
:param in_tensor: Can be a tuple of shape or Numpy NDArray or MXNet NDArray.
:param dtype: Precision or Dtype of the expected Tensor. Ex: "float32", "Int64"
:param initializer: Function reference to the initialize to use. Ex: mx.nd.random.normal, mx.nd.zeros
:param attach_grad: To attach a gradient for the Tensor. Default is True.
:return: MXNet NDArray Tensor.
"""
if isinstance(in_tensor, tuple):
tensor = initializer(ctx=ctx, shape=in_tensor, dtype=dtype)
elif isinstance(in_tensor, np.ndarray):
tensor = nd.array(in_tensor, ctx=ctx, dtype=dtype)
elif isinstance(in_tensor, mx.ndarray):
tensor = in_tensor.as_in_context(ctx=ctx).astype(dtype=dtype)
else:
raise ValueError("Invalid input type for creating input tensor. Input can be tuple() of shape or Numpy Array or"
" MXNet NDArray. Given - ", in_tensor)
if attach_grad:
tensor.attach_grad()
tensor.wait_to_read()
return tensor
| [
"mxnet.autograd.record",
"mxnet.ndarray.waitall",
"mxnet.ndarray.array"
] | [((957, 969), 'mxnet.ndarray.waitall', 'nd.waitall', ([], {}), '()\n', (967, 969), True, 'import mxnet.ndarray as nd\n'), ((1727, 1739), 'mxnet.ndarray.waitall', 'nd.waitall', ([], {}), '()\n', (1737, 1739), True, 'import mxnet.ndarray as nd\n'), ((867, 887), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (885, 887), True, 'import mxnet as mx\n'), ((2742, 2783), 'mxnet.ndarray.array', 'nd.array', (['in_tensor'], {'ctx': 'ctx', 'dtype': 'dtype'}), '(in_tensor, ctx=ctx, dtype=dtype)\n', (2750, 2783), True, 'import mxnet.ndarray as nd\n')] |
NUMBER_THRESHOLD = 256
import numpy as np
import cv2
EPS = 0.00000000000000001
def evaluate_(resS, gt, precision, recall, tpr, fpr):
gtFM = gt
gtFM = cv2.compare(gtFM, 128, cv2.CMP_GT)
gtBM = cv2.bitwise_not(gtFM)
gtF = np.sum(gtFM)
gtB = resS.shape[0] * resS.shape[1] * 255 - gtF
mae = 0.
for i in range(NUMBER_THRESHOLD):
resM = np.zeros(resS.shape)
tpM = np.zeros(resS.shape)
fpM = np.zeros(resS.shape)
resM = cv2.compare(resS, i, cv2.CMP_GT)
tpM = cv2.bitwise_and(resM, gtFM)
fpM = cv2.bitwise_and(resM, gtBM)
tp = np.sum(tpM)
fp = np.sum(fpM)
recall[i] += tp / (gtF + EPS)
total = EPS + tp + fp
precision[i] += (tp + EPS) / total
tpr[i] += (tp + EPS) / (gtF + EPS)
fpr[i] += (fp + EPS) / (gtB + EPS)
np.divide(gtFM, 255.0)
np.divide(resS, 255.0)
resS = cv2.absdiff(gtFM, resS)
mae += np.sum(resS) / (gtFM.shape[0] * gtFM.shape[1])
print(mae)
return mae
def get_AUC(resS, gt):
precision = np.zeros((NUMBER_THRESHOLD, 1))
recall = np.zeros((NUMBER_THRESHOLD, 1))
tpr = np.zeros((NUMBER_THRESHOLD, 1))
fpr = np.zeros((NUMBER_THRESHOLD, 1))
mea = evaluate_(resS, gt, precision, recall, tpr, fpr)
print(recall)
areaROC = 0.
for i in range(NUMBER_THRESHOLD):
areaROC += (tpr[i] + tpr[i - 1]) * (fpr[i - 1] - fpr[i]) / 2.0
print(areaROC)
return areaROC
if __name__ == "__main__":
test2_path = "1036.png"
test1_path = "temp.jpg"
test1 = cv2.imread(test1_path)
test2 = cv2.imread(test2_path)
# gray1 = cv2.cvtColor(test1, cv2.COLOR_BGR2GRAY)
# gray2 = cv2.cvtColor(test2, cv2.COLOR_BGR2GRAY)
np.set_printoptions(threshold=np.inf)
get_AUC(test1[:,:,0], test2[:,:,0]) | [
"cv2.compare",
"numpy.divide",
"cv2.bitwise_and",
"numpy.sum",
"numpy.zeros",
"cv2.bitwise_not",
"cv2.imread",
"cv2.absdiff",
"numpy.set_printoptions"
] | [((160, 194), 'cv2.compare', 'cv2.compare', (['gtFM', '(128)', 'cv2.CMP_GT'], {}), '(gtFM, 128, cv2.CMP_GT)\n', (171, 194), False, 'import cv2\n'), ((206, 227), 'cv2.bitwise_not', 'cv2.bitwise_not', (['gtFM'], {}), '(gtFM)\n', (221, 227), False, 'import cv2\n'), ((238, 250), 'numpy.sum', 'np.sum', (['gtFM'], {}), '(gtFM)\n', (244, 250), True, 'import numpy as np\n'), ((844, 866), 'numpy.divide', 'np.divide', (['gtFM', '(255.0)'], {}), '(gtFM, 255.0)\n', (853, 866), True, 'import numpy as np\n'), ((871, 893), 'numpy.divide', 'np.divide', (['resS', '(255.0)'], {}), '(resS, 255.0)\n', (880, 893), True, 'import numpy as np\n'), ((905, 928), 'cv2.absdiff', 'cv2.absdiff', (['gtFM', 'resS'], {}), '(gtFM, resS)\n', (916, 928), False, 'import cv2\n'), ((1057, 1088), 'numpy.zeros', 'np.zeros', (['(NUMBER_THRESHOLD, 1)'], {}), '((NUMBER_THRESHOLD, 1))\n', (1065, 1088), True, 'import numpy as np\n'), ((1102, 1133), 'numpy.zeros', 'np.zeros', (['(NUMBER_THRESHOLD, 1)'], {}), '((NUMBER_THRESHOLD, 1))\n', (1110, 1133), True, 'import numpy as np\n'), ((1144, 1175), 'numpy.zeros', 'np.zeros', (['(NUMBER_THRESHOLD, 1)'], {}), '((NUMBER_THRESHOLD, 1))\n', (1152, 1175), True, 'import numpy as np\n'), ((1186, 1217), 'numpy.zeros', 'np.zeros', (['(NUMBER_THRESHOLD, 1)'], {}), '((NUMBER_THRESHOLD, 1))\n', (1194, 1217), True, 'import numpy as np\n'), ((1555, 1577), 'cv2.imread', 'cv2.imread', (['test1_path'], {}), '(test1_path)\n', (1565, 1577), False, 'import cv2\n'), ((1590, 1612), 'cv2.imread', 'cv2.imread', (['test2_path'], {}), '(test2_path)\n', (1600, 1612), False, 'import cv2\n'), ((1725, 1762), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (1744, 1762), True, 'import numpy as np\n'), ((369, 389), 'numpy.zeros', 'np.zeros', (['resS.shape'], {}), '(resS.shape)\n', (377, 389), True, 'import numpy as np\n'), ((404, 424), 'numpy.zeros', 'np.zeros', (['resS.shape'], {}), '(resS.shape)\n', (412, 424), True, 'import numpy as np\n'), ((439, 459), 'numpy.zeros', 'np.zeros', (['resS.shape'], {}), '(resS.shape)\n', (447, 459), True, 'import numpy as np\n'), ((475, 507), 'cv2.compare', 'cv2.compare', (['resS', 'i', 'cv2.CMP_GT'], {}), '(resS, i, cv2.CMP_GT)\n', (486, 507), False, 'import cv2\n'), ((522, 549), 'cv2.bitwise_and', 'cv2.bitwise_and', (['resM', 'gtFM'], {}), '(resM, gtFM)\n', (537, 549), False, 'import cv2\n'), ((564, 591), 'cv2.bitwise_and', 'cv2.bitwise_and', (['resM', 'gtBM'], {}), '(resM, gtBM)\n', (579, 591), False, 'import cv2\n'), ((605, 616), 'numpy.sum', 'np.sum', (['tpM'], {}), '(tpM)\n', (611, 616), True, 'import numpy as np\n'), ((630, 641), 'numpy.sum', 'np.sum', (['fpM'], {}), '(fpM)\n', (636, 641), True, 'import numpy as np\n'), ((940, 952), 'numpy.sum', 'np.sum', (['resS'], {}), '(resS)\n', (946, 952), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy import stats, integrate
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
df = pd.read_csv('cn.csv')
sns.pairplot(df);
plt.show()
| [
"seaborn.set",
"seaborn.pairplot",
"pandas.read_csv",
"matplotlib.pyplot.show"
] | [((128, 153), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (135, 153), True, 'import seaborn as sns\n'), ((159, 180), 'pandas.read_csv', 'pd.read_csv', (['"""cn.csv"""'], {}), "('cn.csv')\n", (170, 180), True, 'import pandas as pd\n'), ((181, 197), 'seaborn.pairplot', 'sns.pairplot', (['df'], {}), '(df)\n', (193, 197), True, 'import seaborn as sns\n'), ((199, 209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (207, 209), True, 'import matplotlib.pyplot as plt\n')] |
from matplotlib import pyplot
from collections import Counter
grades = [83, 95, 91, 87, 70, 0, 85, 82, 100, 67, 73, 77, 0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
pyplot.bar([x for x in histogram.keys()], histogram.values(), 8)
pyplot.axis([-5, 105, 0, 5])
pyplot.xticks([10 * i for i in range(11)])
pyplot.xlabel("Decil")
pyplot.ylabel("# de Alunos")
pyplot.title("Distribuição das Notas do Teste 1")
pyplot.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((287, 315), 'matplotlib.pyplot.axis', 'pyplot.axis', (['[-5, 105, 0, 5]'], {}), '([-5, 105, 0, 5])\n', (298, 315), False, 'from matplotlib import pyplot\n'), ((359, 381), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Decil"""'], {}), "('Decil')\n", (372, 381), False, 'from matplotlib import pyplot\n'), ((382, 410), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""# de Alunos"""'], {}), "('# de Alunos')\n", (395, 410), False, 'from matplotlib import pyplot\n'), ((411, 460), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Distribuição das Notas do Teste 1"""'], {}), "('Distribuição das Notas do Teste 1')\n", (423, 460), False, 'from matplotlib import pyplot\n'), ((462, 475), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (473, 475), False, 'from matplotlib import pyplot\n')] |
from django.db import models
from django.utils import timezone
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
user= models.OneToOneField(User, on_delete=models.CASCADE, blank=True)
profile_pic = models.ImageField(blank=True,null=True)
bio = models.TextField(blank=True, null=True)
# title field
# title = models.CharField(max_length=100)
#image field
# image = CloudinaryField('image')
class Post(models.Model):
author = models.ForeignKey(User,on_delete=models.CASCADE)
image = models.ImageField(blank=True,null=True)
caption = models.TextField(blank=True, null=True)
Created_date = models.DateField(default=timezone.now)
comment = models.TextField(blank=True, null=True)
like = models.ManyToManyField(User, blank=True,related_name='likes')
def __str__(self) -> str:
return self.caption
| [
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.ImageField"
] | [((221, 285), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'blank': '(True)'}), '(User, on_delete=models.CASCADE, blank=True)\n', (241, 285), False, 'from django.db import models\n'), ((304, 344), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (321, 344), False, 'from django.db import models\n'), ((354, 393), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (370, 393), False, 'from django.db import models\n'), ((565, 614), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (582, 614), False, 'from django.db import models\n'), ((630, 670), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (647, 670), False, 'from django.db import models\n'), ((688, 727), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (704, 727), False, 'from django.db import models\n'), ((751, 789), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (767, 789), False, 'from django.db import models\n'), ((808, 847), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (824, 847), False, 'from django.db import models\n'), ((863, 925), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)', 'related_name': '"""likes"""'}), "(User, blank=True, related_name='likes')\n", (885, 925), False, 'from django.db import models\n')] |
import numpy as np
import pytest
from tf_fastmri_data.datasets.noisy import NoisyFastMRIDatasetBuilder, ComplexNoisyFastMRIDatasetBuilder
kspace_shape = [2, 640, 322, 1]
file_contrast = 'CORPD_FBK'
@pytest.mark.parametrize('contrast', [None, file_contrast])
@pytest.mark.parametrize('slice_random', [True, False])
@pytest.mark.parametrize('noise_input', [True, False])
@pytest.mark.parametrize('noise_power', [30, (0, 50)])
@pytest.mark.parametrize('noise_mode', ['uniform', 'gaussian'])
@pytest.mark.parametrize('batch_size', [None, 2])
def test_noisy_dataset_train(create_full_fastmri_test_tmp_dataset, contrast, slice_random, noise_input, noise_power, noise_mode, batch_size):
if not (noise_mode == 'gaussian' and isinstance(noise_power, tuple)) and not (batch_size and not slice_random):
path = create_full_fastmri_test_tmp_dataset['fastmri_tmp_singlecoil_train']
ds = NoisyFastMRIDatasetBuilder(
path=path,
contrast=contrast,
slice_random=slice_random,
noise_input=noise_input,
noise_power_spec=noise_power,
noise_mode=noise_mode,
batch_size=batch_size,
)
if noise_input:
(image_noisy, *_others), model_outputs = next(ds.preprocessed_ds.as_numpy_iterator())
else:
image_noisy, model_outputs = next(ds.preprocessed_ds.as_numpy_iterator())
np.testing.assert_equal(model_outputs.shape[-3:], [320, 320, 1])
np.testing.assert_equal(image_noisy.shape[-3:], [320, 320, 1])
np.testing.assert_equal(image_noisy.ndim, 4)
np.testing.assert_equal(model_outputs.ndim, 4)
@pytest.mark.parametrize('contrast', [None, file_contrast])
@pytest.mark.parametrize('slice_random', [True, False])
@pytest.mark.parametrize('noise_input', [True, False])
@pytest.mark.parametrize('noise_power', [30, (0, 50)])
@pytest.mark.parametrize('noise_mode', ['uniform', 'gaussian'])
@pytest.mark.parametrize('batch_size', [None, 2])
def test_complex_noisy_dataset_train(create_full_fastmri_test_tmp_dataset, contrast, slice_random, noise_input, noise_power, noise_mode, batch_size):
if not (noise_mode == 'gaussian' and isinstance(noise_power, tuple)) and not (batch_size and not slice_random):
path = create_full_fastmri_test_tmp_dataset['fastmri_tmp_singlecoil_train']
ds = ComplexNoisyFastMRIDatasetBuilder(
path=path,
contrast=contrast,
slice_random=slice_random,
noise_input=noise_input,
noise_power_spec=noise_power,
noise_mode=noise_mode,
batch_size=batch_size,
)
if noise_input:
(image_noisy, *_others), model_outputs = next(ds.preprocessed_ds.as_numpy_iterator())
else:
image_noisy, model_outputs = next(ds.preprocessed_ds.as_numpy_iterator())
if not (batch_size == 2 and slice_random):
# NOTE: for now complex images can only be of size 320 x 320
np.testing.assert_equal(model_outputs.shape[-3:], (320, 320, 1))
np.testing.assert_equal(image_noisy.shape[-3:], (320, 320, 1))
else:
assert model_outputs.shape[-2] == 320
assert image_noisy.shape[-2] == 320
np.testing.assert_equal(image_noisy.ndim, 4)
assert image_noisy.dtype == np.complex64
np.testing.assert_equal(model_outputs.ndim, 4)
| [
"pytest.mark.parametrize",
"tf_fastmri_data.datasets.noisy.NoisyFastMRIDatasetBuilder",
"tf_fastmri_data.datasets.noisy.ComplexNoisyFastMRIDatasetBuilder",
"numpy.testing.assert_equal"
] | [((203, 261), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""contrast"""', '[None, file_contrast]'], {}), "('contrast', [None, file_contrast])\n", (226, 261), False, 'import pytest\n'), ((263, 317), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""slice_random"""', '[True, False]'], {}), "('slice_random', [True, False])\n", (286, 317), False, 'import pytest\n'), ((319, 372), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_input"""', '[True, False]'], {}), "('noise_input', [True, False])\n", (342, 372), False, 'import pytest\n'), ((374, 427), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_power"""', '[30, (0, 50)]'], {}), "('noise_power', [30, (0, 50)])\n", (397, 427), False, 'import pytest\n'), ((429, 491), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_mode"""', "['uniform', 'gaussian']"], {}), "('noise_mode', ['uniform', 'gaussian'])\n", (452, 491), False, 'import pytest\n'), ((493, 541), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[None, 2]'], {}), "('batch_size', [None, 2])\n", (516, 541), False, 'import pytest\n'), ((1653, 1711), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""contrast"""', '[None, file_contrast]'], {}), "('contrast', [None, file_contrast])\n", (1676, 1711), False, 'import pytest\n'), ((1713, 1767), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""slice_random"""', '[True, False]'], {}), "('slice_random', [True, False])\n", (1736, 1767), False, 'import pytest\n'), ((1769, 1822), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_input"""', '[True, False]'], {}), "('noise_input', [True, False])\n", (1792, 1822), False, 'import pytest\n'), ((1824, 1877), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_power"""', '[30, (0, 50)]'], {}), "('noise_power', [30, (0, 50)])\n", (1847, 1877), False, 'import pytest\n'), ((1879, 1941), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_mode"""', "['uniform', 'gaussian']"], {}), "('noise_mode', ['uniform', 'gaussian'])\n", (1902, 1941), False, 'import pytest\n'), ((1943, 1991), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[None, 2]'], {}), "('batch_size', [None, 2])\n", (1966, 1991), False, 'import pytest\n'), ((897, 1090), 'tf_fastmri_data.datasets.noisy.NoisyFastMRIDatasetBuilder', 'NoisyFastMRIDatasetBuilder', ([], {'path': 'path', 'contrast': 'contrast', 'slice_random': 'slice_random', 'noise_input': 'noise_input', 'noise_power_spec': 'noise_power', 'noise_mode': 'noise_mode', 'batch_size': 'batch_size'}), '(path=path, contrast=contrast, slice_random=\n slice_random, noise_input=noise_input, noise_power_spec=noise_power,\n noise_mode=noise_mode, batch_size=batch_size)\n', (923, 1090), False, 'from tf_fastmri_data.datasets.noisy import NoisyFastMRIDatasetBuilder, ComplexNoisyFastMRIDatasetBuilder\n'), ((1407, 1471), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['model_outputs.shape[-3:]', '[320, 320, 1]'], {}), '(model_outputs.shape[-3:], [320, 320, 1])\n', (1430, 1471), True, 'import numpy as np\n'), ((1480, 1542), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['image_noisy.shape[-3:]', '[320, 320, 1]'], {}), '(image_noisy.shape[-3:], [320, 320, 1])\n', (1503, 1542), True, 'import numpy as np\n'), ((1551, 1595), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['image_noisy.ndim', '(4)'], {}), '(image_noisy.ndim, 4)\n', (1574, 1595), True, 'import numpy as np\n'), ((1604, 1650), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['model_outputs.ndim', '(4)'], {}), '(model_outputs.ndim, 4)\n', (1627, 1650), True, 'import numpy as np\n'), ((2355, 2555), 'tf_fastmri_data.datasets.noisy.ComplexNoisyFastMRIDatasetBuilder', 'ComplexNoisyFastMRIDatasetBuilder', ([], {'path': 'path', 'contrast': 'contrast', 'slice_random': 'slice_random', 'noise_input': 'noise_input', 'noise_power_spec': 'noise_power', 'noise_mode': 'noise_mode', 'batch_size': 'batch_size'}), '(path=path, contrast=contrast,\n slice_random=slice_random, noise_input=noise_input, noise_power_spec=\n noise_power, noise_mode=noise_mode, batch_size=batch_size)\n', (2388, 2555), False, 'from tf_fastmri_data.datasets.noisy import NoisyFastMRIDatasetBuilder, ComplexNoisyFastMRIDatasetBuilder\n'), ((3260, 3304), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['image_noisy.ndim', '(4)'], {}), '(image_noisy.ndim, 4)\n', (3283, 3304), True, 'import numpy as np\n'), ((3362, 3408), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['model_outputs.ndim', '(4)'], {}), '(model_outputs.ndim, 4)\n', (3385, 3408), True, 'import numpy as np\n'), ((3000, 3064), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['model_outputs.shape[-3:]', '(320, 320, 1)'], {}), '(model_outputs.shape[-3:], (320, 320, 1))\n', (3023, 3064), True, 'import numpy as np\n'), ((3077, 3139), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['image_noisy.shape[-3:]', '(320, 320, 1)'], {}), '(image_noisy.shape[-3:], (320, 320, 1))\n', (3100, 3139), True, 'import numpy as np\n')] |
# Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.datasets import make_moons
from sklearn.tree import DecisionTreeRegressor
from carl.learning import as_classifier
def test_as_classifier():
X, y = make_moons(n_samples=100, random_state=1)
y = 2 * y - 1 # use -1/+1 labels
clf = as_classifier(DecisionTreeRegressor())
clf.fit(X, y)
probas = clf.predict_proba(X)
predictions = clf.predict(X)
assert_array_equal(probas.shape, (len(X), 2))
assert_array_equal(predictions, y)
y[-1] = 2
clf = as_classifier(DecisionTreeRegressor())
assert_raises(ValueError, clf.fit, X, y)
| [
"sklearn.datasets.make_moons",
"numpy.testing.assert_raises",
"sklearn.tree.DecisionTreeRegressor",
"numpy.testing.assert_array_equal"
] | [((403, 444), 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': '(100)', 'random_state': '(1)'}), '(n_samples=100, random_state=1)\n', (413, 444), False, 'from sklearn.datasets import make_moons\n'), ((673, 707), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['predictions', 'y'], {}), '(predictions, y)\n', (691, 707), False, 'from numpy.testing import assert_array_equal\n'), ((776, 816), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'clf.fit', 'X', 'y'], {}), '(ValueError, clf.fit, X, y)\n', (789, 816), False, 'from numpy.testing import assert_raises\n'), ((508, 531), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (529, 531), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((747, 770), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (768, 770), False, 'from sklearn.tree import DecisionTreeRegressor\n')] |
from frolic import Vector2
import colorama
GREEN = colorama.Fore.GREEN
RED = colorama.Fore.RED
BRIGHT = colorama.Style.BRIGHT
RESET_ALL = colorama.Style.RESET_ALL
print('\nVector2 Test\n')
try:
v1 = Vector2(2, 4)
v2 = Vector2(x=3, y=5)
v3 = Vector2(y=4, x=6)
test = 'v1.x == 2'
assert(eval(test))
test = 'v1.y == 4'
assert(eval(test))
test = 'v2.x == 3'
assert(eval(test))
test = 'v2.y == 5'
assert(eval(test))
test = 'v3.x == 6'
assert(eval(test))
test = 'v3.y == 4'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} assignment')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} assignment "{test}"')
try:
v1 = Vector2(x=3, y=4)
v2 = v1.add(Vector2(x=2, y=3))
test = 'v2.x == 5'
assert(eval(test))
test = 'v2.y == 7'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} add')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} add "{test}"')
try:
v1 = Vector2(x=3, y=4)
v2 = v1.subtract(Vector2(x=2, y=1))
test = 'v2.x == 1'
assert(eval(test))
test = 'v2.y == 3'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} subtract')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} subtract "{test}"')
try:
v1 = Vector2(x=3, y=4).scale(3)
test = 'v1.x == 9'
assert(eval(test))
test = 'v1.y == 12'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} scale')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} scale "{test}"')
try:
v1 = Vector2(5, 10.2).dot(Vector2(3, 2))
v2 = Vector2(2, 3).dot(Vector2(-3, -5))
v3 = Vector2(4, 4).dot(Vector2(-1, 1))
v4 = Vector2(1.1, 2.2).dot(Vector2(3.3, 4.4))
test = 'type(v1) is float'
assert(eval(test))
test = 'type(v2) is int'
assert(eval(test))
test = 'type(v3) is int'
assert(eval(test))
test = 'type(v4) is float'
assert(eval(test))
test = 'v1 == 35.4'
assert(eval(test))
test = 'v2 == -21'
assert(eval(test))
test = 'v3 == 0'
assert(eval(test))
# floating point error makes v4 == 13.310000000000002 on 64 bit windows
# Wolfram Alpha says it is exactly 13.31
# https://www.wolframalpha.com/input/?
# i=dot+product+calculator
# &assumption={"F","DotProduct","dotVector1"}->"{1.1,2.2}"
# &assumption={"F","DotProduct","dotVector2"}->"{3.3,4.4}"
test = '13.310000 < v4 < 13.3100001'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} dot')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} dot "{test}"')
try:
v1 = Vector2(x=3, y=4).magnitude()
test = 'v1 == 5'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} magnitude')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} magnitude "{test}"')
try:
v1 = Vector2(x=5, y=10)
v2 = Vector2(x=5, y=10)
test = 'v1 == v2'
assert(eval(test))
test = 'v1 is not v2'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} equality check')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} equality check "{test}"')
try:
v1 = Vector2(x=5, y=10)
v2 = v1.clone()
test = 'v1 == v2'
assert(eval(test))
test = 'v1 is not v2'
assert(eval(test))
print(f'{GREEN}{BRIGHT}PASSED:{RESET_ALL} clone')
except:
print(f'{RED}{BRIGHT}FAILED:{RESET_ALL} clone "{test}"')
print('')
| [
"frolic.Vector2"
] | [((205, 218), 'frolic.Vector2', 'Vector2', (['(2)', '(4)'], {}), '(2, 4)\n', (212, 218), False, 'from frolic import Vector2\n'), ((228, 245), 'frolic.Vector2', 'Vector2', ([], {'x': '(3)', 'y': '(5)'}), '(x=3, y=5)\n', (235, 245), False, 'from frolic import Vector2\n'), ((255, 272), 'frolic.Vector2', 'Vector2', ([], {'y': '(4)', 'x': '(6)'}), '(y=4, x=6)\n', (262, 272), False, 'from frolic import Vector2\n'), ((698, 715), 'frolic.Vector2', 'Vector2', ([], {'x': '(3)', 'y': '(4)'}), '(x=3, y=4)\n', (705, 715), False, 'from frolic import Vector2\n'), ((978, 995), 'frolic.Vector2', 'Vector2', ([], {'x': '(3)', 'y': '(4)'}), '(x=3, y=4)\n', (985, 995), False, 'from frolic import Vector2\n'), ((2801, 2819), 'frolic.Vector2', 'Vector2', ([], {'x': '(5)', 'y': '(10)'}), '(x=5, y=10)\n', (2808, 2819), False, 'from frolic import Vector2\n'), ((2829, 2847), 'frolic.Vector2', 'Vector2', ([], {'x': '(5)', 'y': '(10)'}), '(x=5, y=10)\n', (2836, 2847), False, 'from frolic import Vector2\n'), ((3099, 3117), 'frolic.Vector2', 'Vector2', ([], {'x': '(5)', 'y': '(10)'}), '(x=5, y=10)\n', (3106, 3117), False, 'from frolic import Vector2\n'), ((732, 749), 'frolic.Vector2', 'Vector2', ([], {'x': '(2)', 'y': '(3)'}), '(x=2, y=3)\n', (739, 749), False, 'from frolic import Vector2\n'), ((1017, 1034), 'frolic.Vector2', 'Vector2', ([], {'x': '(2)', 'y': '(1)'}), '(x=2, y=1)\n', (1024, 1034), False, 'from frolic import Vector2\n'), ((1553, 1566), 'frolic.Vector2', 'Vector2', (['(3)', '(2)'], {}), '(3, 2)\n', (1560, 1566), False, 'from frolic import Vector2\n'), ((1595, 1610), 'frolic.Vector2', 'Vector2', (['(-3)', '(-5)'], {}), '(-3, -5)\n', (1602, 1610), False, 'from frolic import Vector2\n'), ((1639, 1653), 'frolic.Vector2', 'Vector2', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1646, 1653), False, 'from frolic import Vector2\n'), ((1686, 1703), 'frolic.Vector2', 'Vector2', (['(3.3)', '(4.4)'], {}), '(3.3, 4.4)\n', (1693, 1703), False, 'from frolic import Vector2\n'), ((1273, 1290), 'frolic.Vector2', 'Vector2', ([], {'x': '(3)', 'y': '(4)'}), '(x=3, y=4)\n', (1280, 1290), False, 'from frolic import Vector2\n'), ((1532, 1548), 'frolic.Vector2', 'Vector2', (['(5)', '(10.2)'], {}), '(5, 10.2)\n', (1539, 1548), False, 'from frolic import Vector2\n'), ((1577, 1590), 'frolic.Vector2', 'Vector2', (['(2)', '(3)'], {}), '(2, 3)\n', (1584, 1590), False, 'from frolic import Vector2\n'), ((1621, 1634), 'frolic.Vector2', 'Vector2', (['(4)', '(4)'], {}), '(4, 4)\n', (1628, 1634), False, 'from frolic import Vector2\n'), ((1664, 1681), 'frolic.Vector2', 'Vector2', (['(1.1)', '(2.2)'], {}), '(1.1, 2.2)\n', (1671, 1681), False, 'from frolic import Vector2\n'), ((2580, 2597), 'frolic.Vector2', 'Vector2', ([], {'x': '(3)', 'y': '(4)'}), '(x=3, y=4)\n', (2587, 2597), False, 'from frolic import Vector2\n')] |
'''
This is a sample Lambda function that sends an SMS on click of a
button. It needs one permission sns:Publish. The following policy
allows SNS publish to SMS but not topics or endpoints.
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sns:Publish"
],
"Resource": [
"*"
]
},
{
"Effect": "Deny",
"Action": [
"sns:Publish"
],
"Resource": [
"arn:aws:sns:*:*:*"
]
}
]
}
The following JSON template shows what is sent as the payload:
{
"serialNumber": "GXXXXXXXXXXXXXXXXX",
"batteryVoltage": "xxmV",
"clickType": "SINGLE" | "DOUBLE" | "LONG"
}
A "LONG" clickType is sent if the first press lasts longer than 1.5 seconds.
"SINGLE" and "DOUBLE" clickType payloads are sent for short clicks.
For more documentation, follow the link below.
http://docs.aws.amazon.com/iot/latest/developerguide/iot-lambda-rule.html
'''
from __future__ import print_function
import boto3
import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#phoneAngela = '18596281888'
phoneSam = '18597508490'
sns = boto3.client('sns')
phone_number = phoneSam;
def lambda_handler(event, context):
logger.info('Received event: ' + json.dumps(event))
message = 'Henlo from your IoT Button %s. Here is the full event: %s' % (event['serialNumber'], json.dumps(event))
sns.publish(PhoneNumber=phone_number, Message=message)
logger.info('SMS has been sent to ' + phone_number) | [
"logging.getLogger",
"json.dumps",
"boto3.client"
] | [((1172, 1191), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1189, 1191), False, 'import logging\n'), ((1285, 1304), 'boto3.client', 'boto3.client', (['"""sns"""'], {}), "('sns')\n", (1297, 1304), False, 'import boto3\n'), ((1404, 1421), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (1414, 1421), False, 'import json\n'), ((1523, 1540), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (1533, 1540), False, 'import json\n')] |
"""
@author: <NAME>
@since: 21/08/2016
@modified:
How it works
Depth = go deeper = stack (LIFO)
- explore children as far as possible (keep going until you reach a dead end)
- update current_node with something that has a path you haven't visited yet and do that
1. Start with an empty Stack
2. Initialize each node to have infinity distance
3. Add root node to the stack
4. While nodes in stack
5. current_node = pop stack
5.a if we haven't seen current_node before (distance="infi")
- 'visit' it (distance=0)
- add it to output
6. Loop for each destination node for each incident edge to current_node (for each adjacent node)
- put the adjacent node on the stack
repeat process
B
/
eg: A
\
S - C - D
first push A, in the for each incident edge loop where A is the current node, we will push B and S
then pop C
note: if stalemate in picking node pick in alphabetical order
Time complexity: O(v + e)
- need to look at each node (v)
- for a given current node, we look at all adjacent edges (eventually look at all edges)
O(v + e) as we look at all edges and nodes
O(e) wil at most be O(v^2) and at least O(1)
note: used aggregate analysis as even though
loop for all v
loop for all e to v
could be assumed to be O(ve), we know that when we execute we will only look at
all edges in the graph overall and not at each iteration so O(v + e) but e can be at most v^2 leading to O(v^2)
Space complexity: O(v)
- output array
- stack
both are of the size of the number of nodes = O(2v) = O(v) for iterative version
recursive version can have worst case space O(height)
Can use to traverse trees or graphs
"""
from algorithms_datastructures.datastructures.linked_stack import LinkedStack
def depth_first_search_rec_driver(graph, root, use_rep=False):
nodes = graph.get_vertices()
for node in nodes:
node.distance = "Infinity"
output = []
depth_first_search_recursive(graph, root, output, use_rep)
return output
def depth_first_search_recursive(graph, node, output, use_rep=False):
node.distance = 0 # Mark node as discovered.
output.append(node.rep if use_rep else node.name)
for edge in graph.get_adjacent_edges(node): # Need check if un directed.
destination_node = edge.destination
if destination_node.distance == "Infinity":
depth_first_search_recursive(graph, destination_node, output, use_rep)
def depth_first_search_iterative(graph, root, use_rep=False):
nodes = graph.get_vertices()
for node in nodes:
node.distance = "Infinity"
stack = LinkedStack()
stack.push(root)
output = []
while not stack.is_empty():
current_node = stack.pop() # update current node
if current_node.distance != 0: # current_node is not discovered
current_node.distance = 0 # we have now discovered the current_node
output.append(current_node.rep if use_rep else current_node.name)
edges_incident = graph.get_adjacent_edges(current_node)
for edge in edges_incident:
destination_node = edge.destination
stack.push(destination_node)
# Implicit else for a node we have already seen
print(output)
return output
# ---- Another implementation for practice -----
def dfs_2(graph, node, visited, output): # Output contains the nodes at the end in order of visitation.
visited[node.index] = 1
output.append(node)
for edge in graph.get_adjacent_edges(node): # Visit node depth first (go as far as possible) if not already visited.
if visited[edge.destination.index] != 1:
dfs_2(graph, edge.destination, visited, output)
def test_dictionary_adj_map_output():
for i in range(5):
from algorithms_datastructures.graphs.implementations.adjacency_map import AdjacencyMap
# example from https://en.wikipedia.org/wiki/Depth-first_search
# iterative version correct order: A, E, F, B, D, C, G
# recursive version correct order: A, B, D, F, E, C, G (went to D first instead of F)
# this is also a correct dfs ordering: A, B, F, E, D, C, G
# each of them follow what dfs does which is exploring as far as possible along each branch before back tracking
graph_map = AdjacencyMap()
# set up adj map graph
A = graph_map.add_vertex('A')
B = graph_map.add_vertex('B')
C = graph_map.add_vertex('C')
D = graph_map.add_vertex('D')
E = graph_map.add_vertex('E')
F = graph_map.add_vertex('F')
G = graph_map.add_vertex('G')
graph_map.add_edge(A, B)
graph_map.add_edge(A, C)
graph_map.add_edge(A, E)
graph_map.add_edge(B, D)
graph_map.add_edge(B, F)
graph_map.add_edge(C, G)
graph_map.add_edge(E, F)
source = A
dfs1 = depth_first_search_iterative(graph_map, source)
print("DFS on adj_map graph (iterative): " + str(dfs1))
# possible outputs:
# example graph based on https://en.wikipedia.org/wiki/Depth-first_search
# DFS on adj_map graph (iterative): ['A', 'B', 'D', 'F', 'E', 'C', 'G'], recursive correct form wiki
# DFS on adj_map graph (iterative): ['A', 'E', 'F', 'B', 'D', 'C', 'G'], iterative correct fro wiki
# DFS on adj_map graph (iterative): ['A', 'B', 'F', 'E', 'D', 'C', 'G'], correct
# DFS on adj_map graph (iterative): ['A', 'C', 'G', 'E', 'F', 'B', 'D'], correct
# due to randomness with {} sometimes we get a different order using a adj map
if __name__ == "__main__":
from algorithms_datastructures.graphs.implementations.adjacency_map import AdjacencyMap
from algorithms_datastructures.graphs.implementations.adjacency_list import AdjacencyList
# recursive version correct order: A, B, D, F, E, C, G (went to D first instead of F)
# this is also a correct dfs ordering: A, B, F, E, D, C, G
# each of them follow what dfs does which is exploring as far as possible along each branch before back tracking
graph_map = AdjacencyMap()
# set up adj map graph
A = graph_map.add_vertex('A')
B = graph_map.add_vertex('B')
C = graph_map.add_vertex('C')
D = graph_map.add_vertex('D')
E = graph_map.add_vertex('E')
F = graph_map.add_vertex('F')
G = graph_map.add_vertex('G')
graph_map.add_edge(A, B)
graph_map.add_edge(A, C)
graph_map.add_edge(A, E)
graph_map.add_edge(B, D)
graph_map.add_edge(B, F)
graph_map.add_edge(C, G)
graph_map.add_edge(E, F)
source = A
dfs1_itr = depth_first_search_iterative(graph_map, source)
dfs1_rec = depth_first_search_rec_driver(graph_map, source)
print("DFS on adjacent map, sometimes different order (all correct, check against 4 solns in test()")
print("DFS on adj_map graph (iterative): " + str(dfs1_itr))
print("DFS on adj_map graph (recursive): " + str(dfs1_rec))
""" Note
due to randomness with {} sometimes we get a different order using a adj map
however they are all correct orderings by picking arbitrary adjacent nodes
see the tests below
"""
print("Testing DSF on map graph.")
test_dictionary_adj_map_output()
print("\nTesting DFS on adjacent list")
graph_list = AdjacencyList(7)
# set up adj map graph, slightly different set up due to diff underlying structure
a = graph_list.add_vertex(0,'A')
b = graph_list.add_vertex(1,'B')
c = graph_list.add_vertex(2,'C')
d = graph_list.add_vertex(3,'D')
e = graph_list.add_vertex(4,'E')
f = graph_list.add_vertex(5,'F')
g = graph_list.add_vertex(6, 'G')
# Add edges both ways.
graph_list.add_edge(a, b)
graph_list.add_edge(a, c)
graph_list.add_edge(a, e)
graph_list.add_edge(b, f)
graph_list.add_edge(b, d)
graph_list.add_edge(c, g)
graph_list.add_edge(e, f)
graph_list.add_edge(b, a)
graph_list.add_edge(c, a)
graph_list.add_edge(e, a)
graph_list.add_edge(f, b)
graph_list.add_edge(d, b)
graph_list.add_edge(g, c)
graph_list.add_edge(f, e)
source2 = a
dfs2_itr = depth_first_search_iterative(graph_list, source2, use_rep=True)
print("DFS on adj_list graph (iterative): " + str(dfs2_itr))
dfs2_rec = depth_first_search_rec_driver(graph_list, source2, use_rep=True)
print("DFS on adj_list graph (recursive): " + str(dfs2_rec))
# ---- Another implementation for practice -----
visited = [0] * len(graph_list.get_all_vertices())
output = []
dfs2_rec2 = dfs_2(graph_list, source2, visited, output)
node_representations = [node.rep for node in output]
if node_representations == dfs2_rec:
print("Yay dfs worked")
else:
print("Node reps might be wrong: " + str(node_representations))
# All correct :) | [
"algorithms_datastructures.graphs.implementations.adjacency_list.AdjacencyList",
"algorithms_datastructures.datastructures.linked_stack.LinkedStack",
"algorithms_datastructures.graphs.implementations.adjacency_map.AdjacencyMap"
] | [((2914, 2927), 'algorithms_datastructures.datastructures.linked_stack.LinkedStack', 'LinkedStack', ([], {}), '()\n', (2925, 2927), False, 'from algorithms_datastructures.datastructures.linked_stack import LinkedStack\n'), ((6456, 6470), 'algorithms_datastructures.graphs.implementations.adjacency_map.AdjacencyMap', 'AdjacencyMap', ([], {}), '()\n', (6468, 6470), False, 'from algorithms_datastructures.graphs.implementations.adjacency_map import AdjacencyMap\n'), ((7659, 7675), 'algorithms_datastructures.graphs.implementations.adjacency_list.AdjacencyList', 'AdjacencyList', (['(7)'], {}), '(7)\n', (7672, 7675), False, 'from algorithms_datastructures.graphs.implementations.adjacency_list import AdjacencyList\n'), ((4673, 4687), 'algorithms_datastructures.graphs.implementations.adjacency_map.AdjacencyMap', 'AdjacencyMap', ([], {}), '()\n', (4685, 4687), False, 'from algorithms_datastructures.graphs.implementations.adjacency_map import AdjacencyMap\n')] |
import pytest
import pyroute2
import struct
from .namespaces import Namespace
def int_to_mac(c):
"""Turn an int into a MAC address."""
return ":".join(('{:02x}',)*6).format(
*struct.unpack('BBBBBB', c.to_bytes(6, byteorder='big')))
class LinksFactory(object):
"""A factory for veth pair of interfaces and other L2 stuff.
Each veth interfaces will get named ethX with X strictly
increasing at each call.
"""
def __init__(self):
# We create all those links in a dedicated namespace to avoid
# conflict with other namespaces.
self.ns = Namespace('net')
self.count = 0
def __call__(self, *args):
return self.veth(*args)
def veth(self, ns1, ns2):
"""Create a veth pair between two namespaces."""
with self.ns:
# First, create a link
first = 'eth{}'.format(self.count)
second = 'eth{}'.format(self.count + 1)
ipr = pyroute2.IPRoute()
ipr.link_create(ifname=first,
peer=second,
kind='veth')
idx = [ipr.link_lookup(ifname=x)[0]
for x in (first, second)]
# Set an easy to remember MAC address
ipr.link('set', index=idx[0],
address=int_to_mac(self.count + 1))
ipr.link('set', index=idx[1],
address=int_to_mac(self.count + 2))
# Then, move each to the target namespace
ipr.link('set', index=idx[0], net_ns_fd=ns1.fd('net'))
ipr.link('set', index=idx[1], net_ns_fd=ns2.fd('net'))
# And put them up
with ns1:
ipr = pyroute2.IPRoute()
ipr.link('set', index=idx[0], state='up')
with ns2:
ipr = pyroute2.IPRoute()
ipr.link('set', index=idx[1], state='up')
self.count += 2
def bridge(self, name, *ifaces):
"""Create a bridge."""
ipr = pyroute2.IPRoute()
# Create the bridge
ipr.link_create(ifname=name,
kind='bridge')
idx = ipr.link_lookup(ifname=name)[0]
# Attach interfaces
for iface in ifaces:
port = ipr.link_lookup(ifname=iface)[0]
ipr.link('set', index=port, master=idx)
# Put the bridge up
ipr.link('set', index=idx, state='up')
return idx
def bond(self, name, *ifaces):
"""Create a bond."""
ipr = pyroute2.IPRoute()
# Create the bond
ipr.link_create(ifname=name,
kind='bond')
idx = ipr.link_lookup(ifname=name)[0]
# Attach interfaces
for iface in ifaces:
slave = ipr.link_lookup(ifname=iface)[0]
ipr.link('set', index=slave, state='down')
ipr.link('set', index=slave, master=idx)
# Put the bond up
ipr.link('set', index=idx, state='up')
return idx
def vlan(self, name, id, iface):
"""Create a VLAN."""
ipr = pyroute2.IPRoute()
idx = ipr.link_lookup(ifname=iface)[0]
ipr.link_create(ifname=name,
kind='vlan',
vlan_id=id,
link=idx)
idx = ipr.link_lookup(ifname=name)[0]
ipr.link('set', index=idx, state='up')
return idx
def up(self, name):
ipr = pyroute2.IPRoute()
idx = ipr.link_lookup(ifname=name)[0]
ipr.link('set', index=idx, state='up')
def down(self, name):
ipr = pyroute2.IPRoute()
idx = ipr.link_lookup(ifname=name)[0]
ipr.link('set', index=idx, state='down')
def remove(self, name):
ipr = pyroute2.IPRoute()
idx = ipr.link_lookup(ifname=name)[0]
ipr.link_remove(idx)
@pytest.fixture
def links():
return LinksFactory()
| [
"pyroute2.IPRoute"
] | [((2025, 2043), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (2041, 2043), False, 'import pyroute2\n'), ((2528, 2546), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (2544, 2546), False, 'import pyroute2\n'), ((3084, 3102), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (3100, 3102), False, 'import pyroute2\n'), ((3445, 3463), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (3461, 3463), False, 'import pyroute2\n'), ((3598, 3616), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (3614, 3616), False, 'import pyroute2\n'), ((3755, 3773), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (3771, 3773), False, 'import pyroute2\n'), ((966, 984), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (982, 984), False, 'import pyroute2\n'), ((1715, 1733), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (1731, 1733), False, 'import pyroute2\n'), ((1836, 1854), 'pyroute2.IPRoute', 'pyroute2.IPRoute', ([], {}), '()\n', (1852, 1854), False, 'import pyroute2\n')] |
import contextlib
import os
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import LightStructureEnvironments
from pymatgen.core.structure import Structure
# The computations have to be downloaded from zenodo.org as they too large for a github repository
current_path = os.getcwd()
directory_results = os.path.join(current_path, "../Results")
# First folder contains spin-polarized computation, second one the one without.
# First folder contains spin-polarized computation, second one the one without.
for directory in [
os.path.join(directory_results, "Yb14MnSb11/mp-568088/Spin_2/lobster_1"),
os.path.join(directory_results, "Yb14MnSb11/mp-568088/Spin_mixed/lobster_1")]:
for angle_cutoff in [0.3, 0.2]:
with contextlib.redirect_stdout(None):
with contextlib.redirect_stderr(None):
struct = Structure.from_file(os.path.join(directory, "POSCAR.gz"))
# get valences with Bond Valence analyser
ana = BVAnalyzer()
valences = ana.get_valences(struct)
# Setup the local geometry finder
lgf = LocalGeometryFinder()
lgf.setup_structure(structure=struct)
# Get the StructureEnvironments
se = lgf.compute_structure_environments(only_cations=True, valences=valences)
# compute light structure environments
strategy = SimplestChemenvStrategy(distance_cutoff=1.4, angle_cutoff=angle_cutoff)
lse = LightStructureEnvironments.from_structure_environments(strategy=strategy, structure_environments=se)
print(directory.split("/")[-2])
print("Angle Cutoff: " + str(angle_cutoff))
for ienv, env in enumerate(lse.coordination_environments):
if env is not None:
print(str(struct[ienv].specie) + str(ienv + 1) + ': ' + str(env[0]["ce_symbol"]))
print(" ")
| [
"contextlib.redirect_stdout",
"pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies.SimplestChemenvStrategy",
"os.path.join",
"os.getcwd",
"pymatgen.analysis.chemenv.coordination_environments.structure_environments.LightStructureEnvironments.from_structure_environments",
"contextlib.redirect_stderr",
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder.LocalGeometryFinder",
"pymatgen.analysis.bond_valence.BVAnalyzer"
] | [((578, 589), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (587, 589), False, 'import os\n'), ((610, 650), 'os.path.join', 'os.path.join', (['current_path', '"""../Results"""'], {}), "(current_path, '../Results')\n", (622, 650), False, 'import os\n'), ((837, 909), 'os.path.join', 'os.path.join', (['directory_results', '"""Yb14MnSb11/mp-568088/Spin_2/lobster_1"""'], {}), "(directory_results, 'Yb14MnSb11/mp-568088/Spin_2/lobster_1')\n", (849, 909), False, 'import os\n'), ((915, 991), 'os.path.join', 'os.path.join', (['directory_results', '"""Yb14MnSb11/mp-568088/Spin_mixed/lobster_1"""'], {}), "(directory_results, 'Yb14MnSb11/mp-568088/Spin_mixed/lobster_1')\n", (927, 991), False, 'import os\n'), ((1043, 1075), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['None'], {}), '(None)\n', (1069, 1075), False, 'import contextlib\n'), ((1284, 1296), 'pymatgen.analysis.bond_valence.BVAnalyzer', 'BVAnalyzer', ([], {}), '()\n', (1294, 1296), False, 'from pymatgen.analysis.bond_valence import BVAnalyzer\n'), ((1409, 1430), 'pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder.LocalGeometryFinder', 'LocalGeometryFinder', ([], {}), '()\n', (1428, 1430), False, 'from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder\n'), ((1691, 1762), 'pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies.SimplestChemenvStrategy', 'SimplestChemenvStrategy', ([], {'distance_cutoff': '(1.4)', 'angle_cutoff': 'angle_cutoff'}), '(distance_cutoff=1.4, angle_cutoff=angle_cutoff)\n', (1714, 1762), False, 'from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy\n'), ((1781, 1885), 'pymatgen.analysis.chemenv.coordination_environments.structure_environments.LightStructureEnvironments.from_structure_environments', 'LightStructureEnvironments.from_structure_environments', ([], {'strategy': 'strategy', 'structure_environments': 'se'}), '(strategy=strategy,\n structure_environments=se)\n', (1835, 1885), False, 'from pymatgen.analysis.chemenv.coordination_environments.structure_environments import LightStructureEnvironments\n'), ((1094, 1126), 'contextlib.redirect_stderr', 'contextlib.redirect_stderr', (['None'], {}), '(None)\n', (1120, 1126), False, 'import contextlib\n'), ((1173, 1209), 'os.path.join', 'os.path.join', (['directory', '"""POSCAR.gz"""'], {}), "(directory, 'POSCAR.gz')\n", (1185, 1209), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 13:50:06 2019
@author: KemyPeti
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def AddFeatureToDict(feature_dict,
data_to_add_key,
data_to_add_value,
type_ = "int"):
'''
Args:
\n\t feature_dict : input feature dict
\n\t data_to_add_key : key of the data in dict
\n\t data_to_add_value : new data
\n\t type_ : "int" for int type / "byte" for bytes / "float" for float
Returns:
\n\t feature_dict : appended dict
'''
if(type_ == "int"):
feature_creator = _int64_feature
elif(type_ == "byte"):
feature_creator = _bytes_feature
elif(type_ == "float"):
feature_creator = _float_feature
else:
raise Exception("Incorrect arg: type_")
try:
feature_dict[data_to_add_key] = feature_creator(data_to_add_value)
except:
raise Exception("arg type_ and the type of arg data_to_add_value are not consistent")
return feature_dict
def FeatureDict2TfExample(feature_dict):
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example | [
"six.binary_type",
"tensorflow.train.Int64List",
"tensorflow.train.BytesList",
"tensorflow.train.Features",
"tensorflow.train.FloatList"
] | [((864, 904), 'six.binary_type', 'six.binary_type', (['value'], {'encoding': '"""utf-8"""'}), "(value, encoding='utf-8')\n", (879, 904), False, 'import six\n'), ((426, 457), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (444, 457), True, 'import tensorflow as tf\n'), ((654, 685), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (672, 685), True, 'import tensorflow as tf\n'), ((945, 978), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (963, 978), True, 'import tensorflow as tf\n'), ((2006, 2045), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), '(feature=feature_dict)\n', (2023, 2045), True, 'import tensorflow as tf\n')] |
import os
from base64 import b64encode
import yaml
flask_secret_key = b64encode(os.urandom(128)).decode('utf-8')
jwt_secret_key = b64encode(os.urandom(128)).decode('utf-8')
try:
with open('secret_config.yaml') as f:
parsed_config = yaml.load(f, yaml.BaseLoader)
except FileNotFoundError:
parsed_config = dict()
parsed_config.update({
'SECRET_KEY': flask_secret_key,
'JWT_SECRET_KEY': jwt_secret_key
})
with open('secret_config.yaml', 'w+') as f:
# yaml.dump(parsed_config, f, yaml.BaseDumper)
yaml.safe_dump(parsed_config, f)
print('Done') | [
"os.urandom",
"yaml.safe_dump",
"yaml.load"
] | [((531, 563), 'yaml.safe_dump', 'yaml.safe_dump', (['parsed_config', 'f'], {}), '(parsed_config, f)\n', (545, 563), False, 'import yaml\n'), ((247, 276), 'yaml.load', 'yaml.load', (['f', 'yaml.BaseLoader'], {}), '(f, yaml.BaseLoader)\n', (256, 276), False, 'import yaml\n'), ((82, 97), 'os.urandom', 'os.urandom', (['(128)'], {}), '(128)\n', (92, 97), False, 'import os\n'), ((142, 157), 'os.urandom', 'os.urandom', (['(128)'], {}), '(128)\n', (152, 157), False, 'import os\n')] |
"""Utilities for sending rabbitmq messages"""
import pika
from services import logger_service
import json
import traceback
from medaimodels import ModelOutput
import services
CLASSIFIER_QUEUE = 'classifier_results'
EVAL_QUEUE = 'eval_results'
LOG_QUEUE = 'log_results'
def send_message(queue: str, message):
try:
rabbit_url = services.settings_service.get_rabbitmq_url()
connection = pika.BlockingConnection(pika.URLParameters(rabbit_url))
channel = connection.channel()
channel.queue_declare(queue)
channel.basic_publish(exchange='',
routing_key=queue,
body=message)
print(f'sent message to {queue}: {message}')
connection.close()
except:
print(f'Failed sending message to {queue}, message: {message}', traceback.format_exc())
def send_notification(msg: str, notification_type: str):
"""Send notification to the message queue"""
message = json.dumps({"message": msg, "type": notification_type})
send_message('notifications', message)
def send_model_log(eval_id: str, line: str):
message = json.dumps({"evalId": eval_id, "line": line})
send_message('model_log', message)
def start_result_queue():
url = services.settings_service.get_rabbitmq_url()
connection = pika.BlockingConnection(pika.URLParameters(url))
channel = connection.channel()
channel.queue_declare(EVAL_QUEUE)
channel.queue_declare(CLASSIFIER_QUEUE)
def get_channel():
url = services.settings_service.get_rabbitmq_url()
connection = pika.BlockingConnection(pika.URLParameters(url))
channel = connection.channel()
return channel
| [
"traceback.format_exc",
"pika.URLParameters",
"json.dumps",
"services.settings_service.get_rabbitmq_url"
] | [((992, 1047), 'json.dumps', 'json.dumps', (["{'message': msg, 'type': notification_type}"], {}), "({'message': msg, 'type': notification_type})\n", (1002, 1047), False, 'import json\n'), ((1152, 1197), 'json.dumps', 'json.dumps', (["{'evalId': eval_id, 'line': line}"], {}), "({'evalId': eval_id, 'line': line})\n", (1162, 1197), False, 'import json\n'), ((1275, 1319), 'services.settings_service.get_rabbitmq_url', 'services.settings_service.get_rabbitmq_url', ([], {}), '()\n', (1317, 1319), False, 'import services\n'), ((1535, 1579), 'services.settings_service.get_rabbitmq_url', 'services.settings_service.get_rabbitmq_url', ([], {}), '()\n', (1577, 1579), False, 'import services\n'), ((342, 386), 'services.settings_service.get_rabbitmq_url', 'services.settings_service.get_rabbitmq_url', ([], {}), '()\n', (384, 386), False, 'import services\n'), ((1361, 1384), 'pika.URLParameters', 'pika.URLParameters', (['url'], {}), '(url)\n', (1379, 1384), False, 'import pika\n'), ((1621, 1644), 'pika.URLParameters', 'pika.URLParameters', (['url'], {}), '(url)\n', (1639, 1644), False, 'import pika\n'), ((432, 462), 'pika.URLParameters', 'pika.URLParameters', (['rabbit_url'], {}), '(rabbit_url)\n', (450, 462), False, 'import pika\n'), ((846, 868), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (866, 868), False, 'import traceback\n')] |
import torch
from torch.optim.optimizer import Optimizer
import torch.distributed as dist
class AdaHessian(Optimizer):
"""
Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning"
Arguments:
params (iterable) -- iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional) -- learning rate (default: 0.1)
betas ((float, float), optional) -- coefficients used for computing running averages of gradient and the squared hessian trace (default: (0.9, 0.999))
eps (float, optional) -- term added to the denominator to improve numerical stability (default: 1e-8)
warmup (int, optional): number of warmup steps (default: 0)
init_lr (float, optional): initial learning rate for warmup (default: 0.0)
weight_decay (float, optional) -- weight decay (L2 penalty) (default: 0.0)
hessian_power (float, optional) -- exponent of the hessian trace (default: 1.0)
update_each (int, optional) -- compute the hessian trace approximation only after *this* number of steps (to save time) (default: 1)
num_threads (int, optional) -- number of threads for distributed training (default: 1)
"""
def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-4, weight_decay=0.0,
warmup=0, init_lr=0.0, hessian_power=1.0, update_each=1,
num_threads=1, average_conv_kernel=False):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= warmup:
raise ValueError("Invalid warmup updates: {}".format(warmup))
if not 0.0 <= init_lr <= 1.0:
raise ValueError("Invalid initial learning rate: {}".format(init_lr))
if not 0.0 <= hessian_power <= 1.0:
raise ValueError(f"Invalid Hessian power value: {hessian_power}")
self.update_each = update_each
self.num_threads = num_threads
self.average_conv_kernel = average_conv_kernel
defaults = dict(lr=lr, betas=betas, eps=eps, warmup=warmup, init_lr=init_lr, base_lr=lr,
weight_decay=weight_decay, hessian_power=hessian_power)
super(AdaHessian, self).__init__(params, defaults)
for p in self.get_params():
p.hess = 0.0
self.state[p]["hessian step"] = 0
def get_params(self):
"""
Gets all parameters in all param_groups with gradients
"""
return (p for group in self.param_groups for p in group['params'] if p.requires_grad)
def zero_hessian(self):
"""
Zeros out the accumalated hessian traces.
"""
for p in self.get_params():
if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0:
p.hess.zero_()
def set_hessian(self):
"""
Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.
"""
params = []
for p in filter(lambda p: p.grad is not None, self.get_params()):
if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step
params.append(p)
self.state[p]["hessian step"] += 1
if len(params) == 0:
return
grads = [p.grad for p in params]
# Rademacher distribution {-1.0, 1.0}
zs = [torch.randint_like(p, high=2) * 2.0 - 1.0 for p in params]
# sync zs for distributed setting
if self.num_threads > 1:
for z in zs:
dist.broadcast(z, src=0)
hzs = torch.autograd.grad(grads, params, grad_outputs=zs, only_inputs=True, retain_graph=True)
for hz, z, p in zip(hzs, zs, params):
hut_trace = (hz * z).contiguous() # approximate the expected values of z*(H@z)
if self.num_threads > 1:
dist.all_reduce(hut_trace)
hut_trace.div_(self.num_threads)
p.hess = hut_trace
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)
"""
loss = None
if closure is not None:
loss = closure()
self.zero_hessian()
self.set_hessian()
for group in self.param_groups:
for p in group['params']:
if p.grad is None or p.hess is None:
continue
if self.average_conv_kernel and p.dim() == 4:
p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()
state = self.state[p]
# State initialization
if len(state) == 1:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of gradient values
state['exp_hessian_diag_sq'] = torch.zeros_like(p.data) # Exponential moving average of Hessian diagonal square values
# Calculate current lr
if state['step'] < group['warmup']:
curr_lr = (group['base_lr'] - group['init_lr']) * state['step'] / group['warmup'] + group['init_lr']
else:
curr_lr = group['lr']
# Perform correct stepweight decay as in AdamW
p.mul_(1 - curr_lr * group['weight_decay'])
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)
exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
k = group['hessian_power']
denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])
# make update
step_size = curr_lr / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| [
"torch.abs",
"torch.randint_like",
"torch.distributed.all_reduce",
"torch.distributed.broadcast",
"torch.autograd.grad",
"torch.no_grad",
"torch.zeros_like"
] | [((4418, 4433), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4431, 4433), False, 'import torch\n'), ((4024, 4116), 'torch.autograd.grad', 'torch.autograd.grad', (['grads', 'params'], {'grad_outputs': 'zs', 'only_inputs': '(True)', 'retain_graph': '(True)'}), '(grads, params, grad_outputs=zs, only_inputs=True,\n retain_graph=True)\n', (4043, 4116), False, 'import torch\n'), ((3984, 4008), 'torch.distributed.broadcast', 'dist.broadcast', (['z'], {'src': '(0)'}), '(z, src=0)\n', (3998, 4008), True, 'import torch.distributed as dist\n'), ((4305, 4331), 'torch.distributed.all_reduce', 'dist.all_reduce', (['hut_trace'], {}), '(hut_trace)\n', (4320, 4331), True, 'import torch.distributed as dist\n'), ((3809, 3838), 'torch.randint_like', 'torch.randint_like', (['p'], {'high': '(2)'}), '(p, high=2)\n', (3827, 3838), False, 'import torch\n'), ((5332, 5356), 'torch.zeros_like', 'torch.zeros_like', (['p.data'], {}), '(p.data)\n', (5348, 5356), False, 'import torch\n'), ((5457, 5481), 'torch.zeros_like', 'torch.zeros_like', (['p.data'], {}), '(p.data)\n', (5473, 5481), False, 'import torch\n'), ((5065, 5082), 'torch.abs', 'torch.abs', (['p.hess'], {}), '(p.hess)\n', (5074, 5082), False, 'import torch\n')] |
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from utils import StateFactory
def argmax(array):
rows = range(array.shape[0])
cols = array.argmax(axis=1)
return rows, cols
def compute_prob(dim, mu, rho, sigma, tau, state_factory):
state = state_factory()
exclusive = state.randn(3 * dim) * (sigma ** 2 - rho) ** 0.5
shared = mu + state.randn() * rho ** 0.5
theta = shared + exclusive
# making one observation per entry
x0 = np.zeros(3 * dim)
x0[:dim] = -1.0 / dim ** 0.5
x1 = np.zeros(3 * dim)
x1[:dim] = -1.0 / dim ** 0.5
x1[dim:] = +1.0 / dim ** 0.5
# check whether x1 is the optimal arm
x1_optimal = theta @ x1 > theta @ x0 and theta @ x1 > 0
# check whether x0 is the first choice
sample = state.randn(3 * dim)
x0_selected = sample @ x0 > 0 and sample @ x0 > sample @ x1
# the observation after the first round
y = theta @ x0 + state.randn() * tau
mean = (y * x0) / (1 + x0 @ x0)
# computing the probability that x0 is pulled at t=2
mean0 = mean @ x0
var0 = (x0 @ x0) / (1 + x0 @ x0)
ratio0 = mean0 / var0 ** 0.5
prob0 = stats.norm.cdf(ratio0)
# computing the probability that x1 is pulled at t=2
mean1 = mean @ x1
var1 = x1 @ (x1 - x0 * (x0 @ x0) / (1 + x0 @ x0))
ratio1 = mean1 / var1 ** 0.5
prob1 = stats.norm.cdf(ratio1)
# applying the union bound
prob = prob0 + prob1
return np.array([x1_optimal, x0_selected, 1.0 / prob])
def run_experiment(n_iter, dim, mu, rho, sigma, tau, seed):
state_factory = StateFactory(seed)
results = np.stack(
[compute_prob(dim, mu, rho, sigma, tau, state_factory) for _ in range(n_iter)]
)
if rho > 0.5:
print(results)
exit(0)
return results
def __main__():
types = ["mu", "rho", "dim"]
labels = {
"mu": "$\\mu$",
"rho": "$\\rho$",
"dim": "$d$",
}
parser = argparse.ArgumentParser(
description="Verify the Thompson sampling failures empirically."
)
parser.add_argument("--n-iter", type=int, help="number of iterations", default=50)
parser.add_argument(
"--n-value", type=int, help="number of different values to try", default=21
)
parser.add_argument(
"--dim", type=int, help="number of iterations", default=50 * 1000
)
parser.add_argument("--mu", type=float, help="prior mean", default=0.01)
parser.add_argument("--rho", type=float, help="prior correlation", default=0.0)
parser.add_argument("--sigma", type=float, help="prior sd", default=1.0)
parser.add_argument("--tau", type=float, help="noise sd", default=1.0)
parser.add_argument("--seed", type=int, help="initial random seed", default=1)
parser.add_argument(
"--change", type=str, help="varying parameter", choices=types, default="dim"
)
args = parser.parse_args()
xticks = None
xrots = "0"
if args.change == "mu":
mus = np.linspace(0.0, args.mu, args.n_value)
xticks = [f"{i:.2f}" for i in mus]
xrots = "45"
else:
mus = np.repeat(args.mu, args.n_value)
if args.change == "rho":
rhos = np.linspace(0.0, args.rho, args.n_value)
xticks = [f"{i:.2f}" for i in rhos]
xrots = "45"
else:
rhos = np.repeat(args.rho, args.n_value)
if args.change == "dim":
dims = 2 ** np.arange(1, args.n_value + 1)
args.dim = dims[-1]
xticks = [f"$2^{{{i}}}$" for i in range(args.n_value)]
else:
dims = np.repeat(args.dim, args.n_value).astype(np.int)
results = []
for mu, rho, dim in zip(mus, rhos, dims):
print(f"Running experiment with mu = {mu:.3f} rho = {rho:.3f} dim = {dim}")
results.append(
run_experiment(
n_iter=args.n_iter,
dim=dim,
mu=mu,
rho=rho,
sigma=args.sigma,
tau=args.tau,
seed=args.seed,
)
)
results = np.array(results)
# plotting the data
y = results[:, :, 2].T
plt.yscale('log')
plt.boxplot(y, positions=range(args.n_value), showfliers=False)
plt.xticks(range(args.n_value), xticks, rotation=xrots)
plt.xlabel(labels[args.change])
plt.ylabel("Expected number of failures for LinTS")
plt.tight_layout()
plt.savefig(
f"plots/example-2-{args.change}-{args.dim}-{args.mu}-{args.rho}-{args.seed}.pdf"
)
print(f"Ratio of times x1 is optimal:")
print(f"\t{results[:, :, 0].mean(axis=1)}")
print(f"Ratio of times x0 is selected at t=0:")
print(f"\t{results[:, :, 1].mean(axis=1)}")
print("Experiment finished successfully.")
if __name__ == "__main__":
__main__()
| [
"matplotlib.pyplot.savefig",
"numpy.repeat",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"utils.StateFactory",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"scipy.stats.norm.cdf",
"matplotlib.pyplot.yscale",
"numpy.arange"
] | [((515, 532), 'numpy.zeros', 'np.zeros', (['(3 * dim)'], {}), '(3 * dim)\n', (523, 532), True, 'import numpy as np\n'), ((576, 593), 'numpy.zeros', 'np.zeros', (['(3 * dim)'], {}), '(3 * dim)\n', (584, 593), True, 'import numpy as np\n'), ((1189, 1211), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['ratio0'], {}), '(ratio0)\n', (1203, 1211), False, 'from scipy import stats\n'), ((1391, 1413), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['ratio1'], {}), '(ratio1)\n', (1405, 1413), False, 'from scipy import stats\n'), ((1483, 1530), 'numpy.array', 'np.array', (['[x1_optimal, x0_selected, 1.0 / prob]'], {}), '([x1_optimal, x0_selected, 1.0 / prob])\n', (1491, 1530), True, 'import numpy as np\n'), ((1613, 1631), 'utils.StateFactory', 'StateFactory', (['seed'], {}), '(seed)\n', (1625, 1631), False, 'from utils import StateFactory\n'), ((1986, 2080), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Verify the Thompson sampling failures empirically."""'}), "(description=\n 'Verify the Thompson sampling failures empirically.')\n", (2009, 2080), False, 'import argparse\n'), ((4079, 4096), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4087, 4096), True, 'import numpy as np\n'), ((4153, 4170), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4163, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4304, 4335), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['labels[args.change]'], {}), '(labels[args.change])\n', (4314, 4335), True, 'import matplotlib.pyplot as plt\n'), ((4340, 4391), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected number of failures for LinTS"""'], {}), "('Expected number of failures for LinTS')\n", (4350, 4391), True, 'import matplotlib.pyplot as plt\n'), ((4396, 4414), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4412, 4414), True, 'import matplotlib.pyplot as plt\n'), ((4420, 4523), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots/example-2-{args.change}-{args.dim}-{args.mu}-{args.rho}-{args.seed}.pdf"""'], {}), "(\n f'plots/example-2-{args.change}-{args.dim}-{args.mu}-{args.rho}-{args.seed}.pdf'\n )\n", (4431, 4523), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3058), 'numpy.linspace', 'np.linspace', (['(0.0)', 'args.mu', 'args.n_value'], {}), '(0.0, args.mu, args.n_value)\n', (3030, 3058), True, 'import numpy as np\n'), ((3147, 3179), 'numpy.repeat', 'np.repeat', (['args.mu', 'args.n_value'], {}), '(args.mu, args.n_value)\n', (3156, 3179), True, 'import numpy as np\n'), ((3225, 3265), 'numpy.linspace', 'np.linspace', (['(0.0)', 'args.rho', 'args.n_value'], {}), '(0.0, args.rho, args.n_value)\n', (3236, 3265), True, 'import numpy as np\n'), ((3356, 3389), 'numpy.repeat', 'np.repeat', (['args.rho', 'args.n_value'], {}), '(args.rho, args.n_value)\n', (3365, 3389), True, 'import numpy as np\n'), ((3440, 3470), 'numpy.arange', 'np.arange', (['(1)', '(args.n_value + 1)'], {}), '(1, args.n_value + 1)\n', (3449, 3470), True, 'import numpy as np\n'), ((3587, 3620), 'numpy.repeat', 'np.repeat', (['args.dim', 'args.n_value'], {}), '(args.dim, args.n_value)\n', (3596, 3620), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
from ricecooker.utils import downloader, html_writer
from ricecooker.chefs import SushiChef
from ricecooker.classes import nodes, files, questions
from ricecooker.config import LOGGER # Use LOGGER to print messages
from ricecooker.exceptions import raise_for_invalid_channel
from le_utils.constants import exercises, content_kinds, file_formats, format_presets, languages, licenses
import cssutils
import requests
import youtube_dl
from bs4 import BeautifulSoup
from io import BytesIO
from PIL import Image
import logging
cssutils.log.setLevel(logging.CRITICAL)
# Run constants
################################################################################
CHANNEL_NAME = "Exploratorium" # Name of channel
CHANNEL_SOURCE_ID = "sushi-chef-exploratorium" # Channel's unique id
CHANNEL_DOMAIN = "www.exploratorium.edu" # Who is providing the content
CHANNEL_LANGUAGE = "en" # Language of channel
CHANNEL_DESCRIPTION = "Science tools and experiences helping " + \
"students become active explorers: hundreds of " + \
"explore-for-yourself exhibits, activities, " + \
"videos, and much more. Appropriate for all " + \
"ages of students, as supplementary, hands-on " + \
"demonstration of scientific principles."
CHANNEL_THUMBNAIL = "http://wtxec.org/exploratorium_tm.jpg" # Local path or url to image file (optional)
# Additional constants
################################################################################
COPYRIGHT_HOLDER = "Exploratorium Teacher Institute"
LICENSE = licenses.CC_BY_NC_SA
BASE_URL = "https://www.exploratorium.edu/{}"
SNACK_URL = "https://www.exploratorium.edu/snacks/snacks-by-subject"
VIDEO_URL = "https://www.exploratorium.edu/video/subjects"
BRIGHTCOVE_URL = "http://players.brightcove.net/{account}/{player}_default/index.html?videoId={videoid}"
IMAGE_EXTENSIONS = ['jpeg', 'jpg', 'gif', 'png', 'svg']
DOWNLOAD_ATTEMPTS = 25
# Directory to download snacks (html zips) into
SNACK_DIRECTORY = "{}{}{}".format(os.path.dirname(os.path.realpath(__file__)), os.path.sep, "snacks")
if not os.path.exists(SNACK_DIRECTORY):
os.makedirs(SNACK_DIRECTORY)
# Directory to download videos into
VIDEO_DIRECTORY = "{}{}{}".format(os.path.dirname(os.path.realpath(__file__)), os.path.sep, "videos")
if not os.path.exists(VIDEO_DIRECTORY):
os.makedirs(VIDEO_DIRECTORY)
# Directory to download shared assets (e.g. pngs, gifs, svgs) from stylesheets into
SHARED_ASSET_DIRECTORY = os.path.sep.join([SNACK_DIRECTORY, "shared-assets"])
if not os.path.exists(SHARED_ASSET_DIRECTORY):
os.makedirs(SHARED_ASSET_DIRECTORY)
# The chef subclass
################################################################################
class MyChef(SushiChef):
"""
This class uploads the Exploratorium channel to Kolibri Studio.
Your command line script should call the `main` method as the entry point,
which performs the following steps:
- Parse command line arguments and options (run `./sushichef.py -h` for details)
- Call the `SushiChef.run` method which in turn calls `pre_run` (optional)
and then the ricecooker function `uploadchannel` which in turn calls this
class' `get_channel` method to get channel info, then `construct_channel`
to build the contentnode tree.
For more info, see https://github.com/learningequality/ricecooker/tree/master/docs
"""
channel_info = { # Channel Metadata
'CHANNEL_SOURCE_DOMAIN': CHANNEL_DOMAIN, # Who is providing the content
'CHANNEL_SOURCE_ID': CHANNEL_SOURCE_ID, # Channel's unique id
'CHANNEL_TITLE': CHANNEL_NAME, # Name of channel
'CHANNEL_LANGUAGE': CHANNEL_LANGUAGE, # Language of channel
'CHANNEL_THUMBNAIL': CHANNEL_THUMBNAIL, # Local path or url to image file (optional)
'CHANNEL_DESCRIPTION': CHANNEL_DESCRIPTION, # Description of the channel (optional)
}
# Your chef subclass can override/extend the following method:
# get_channel: to create ChannelNode manually instead of using channel_info
# pre_run: to perform preliminary tasks, e.g., crawling and scraping website
# __init__: if need to customize functionality or add command line arguments
def construct_channel(self, *args, **kwargs):
"""
Creates ChannelNode and build topic tree
Args:
- args: arguments passed in during upload_channel (currently None)
- kwargs: extra argumens and options not handled by `uploadchannel`.
For example, add the command line option lang="fr" and the string
"fr" will be passed along to `construct_channel` as kwargs['lang'].
Returns: ChannelNode
Channel structure:
Activities
Subject
Subdirectory (if any)
Activity.zip
Videos
Subject
Collection
Video.mp4
"""
channel = self.get_channel(*args, **kwargs) # Create ChannelNode from data in self.channel_info
channel.add_child(scrape_snack_menu(SNACK_URL))
channel.add_child(scrape_video_menu(VIDEO_URL))
raise_for_invalid_channel(channel) # Check for errors in channel construction
return channel
def read(url):
""" Read contents from url
Args:
url (str): url to read
Returns contents from url
"""
return downloader.read(format_url(url))
def format_url(url):
""" Format relative urls to be absolute urls
Args:
url (str): url to format
Returns absolute url (str)
"""
if url.startswith('http'):
return url
return BASE_URL.format(url.lstrip('/'))
def get_next_page_url(contents):
""" Get link to next page
Args:
contents (BeautifulSoup): page contents to search for next page
Returns link to next page (str)
"""
next_link = contents.find('li', {'class': 'pager-next'})
if next_link:
return next_link.find('a')['href']
def get_thumbnail_url(url):
""" Get thumbnail, converting gifs to pngs if necessary
Args:
url (str): thumbnail url
Returns link to next page (str)
"""
url = url.split("?")[0].rstrip("%20")
# Convert gifs to pngs
if url.endswith('.gif'):
imgfile = BytesIO(read(url))
url = os.path.sep.join([SNACK_DIRECTORY, url.split("/")[-1].replace('gif', 'png')])
with Image.open(imgfile) as img:
img.save(url,'png', optimize=True, quality=70)
return url or None
def get_brightcove_mapping(contents, get_playlist=False):
""" Scrape contents for brightcove videos
Args:
contents (BeautifulSoup): page contents
get_playlist (bool): determines whether or not to scrape for playlists too
Returns mapping of brightcove urls and data
"""
brightcove_mapping = {}
account = "" # Store account number as it isn't stored on playlist video elements
# Get main videos
for video in contents.find_all('video', {'class': 'bc5player'}):
account = video['data-account']
attribution = contents.find('div', {'class': 'attribution'})
brightcove_mapping.update({video['data-video-id']: {
"original_el": video,
"author": attribution and attribution.text,
"url": BRIGHTCOVE_URL.format(account=video['data-account'],
player=video['data-player'],
videoid=video['data-video-id'])
}})
# Add videos from brightcove playlists
playlist = contents.find('div', {'id': 'media-collection-banner-playlist'})
if get_playlist and playlist:
for video in playlist.find_all('div', {'class': 'playlist-item'}):
brightcove_mapping.update({video['data-id']: {
"title": video['data-title'],
"append_to": playlist,
"url": BRIGHTCOVE_URL.format(account=account,
player=video['data-pid'],
videoid=video['data-id']),
}})
return brightcove_mapping
# Video scraping functions
################################################################################
def scrape_video_menu(url):
""" Scrape videos from url
Args:
url (str): url to scrape from (e.g. https://www.exploratorium.edu/video/subjects)
Returns TopicNode containing all videos
"""
LOGGER.info("SCRAPING VIDEOS...")
video_topic = nodes.TopicNode(title="Videos", source_id="main-topic-videos")
contents = BeautifulSoup(read(url), 'html5lib')
for subject in contents.find_all('div', {'class': 'subject'}):
title = subject.find('div', {'class': 'name'}).text.strip().replace("’", "'")
LOGGER.info(" {}".format(title))
topic = nodes.TopicNode(
title=title,
source_id="videos-{}".format(title),
thumbnail=get_thumbnail_url(subject.find('img')['src']),
)
video_topic.add_child(topic)
scrape_video_subject(subject.find('a')['href'], topic)
return video_topic
def scrape_video_subject(url, topic):
""" Scrape collections under video subject and add to the topic node
Args:
url (str): url to subject page (e.g. https://www.exploratorium.edu/search/video?f[0]=field_activity_subject%3A565)
topic (TopicNode): topic to add collection nodes to
"""
contents = BeautifulSoup(read(url), 'html5lib')
sidebar = contents.find("div", {"id": "filter_content"}).find("div", {"class": "content"})
for collection in sidebar.find_all("li"):
title = collection.find('span').text.replace('filter', '').replace("Apply", "").strip().replace("’", "'")
LOGGER.info(" {}".format(title))
collection_topic = nodes.TopicNode(title=title, source_id="videos-collection-{}".format(title))
topic.add_child(collection_topic)
scrape_video_collection(collection.find('a')['href'], collection_topic)
def scrape_video_collection(url, topic):
""" Scrape videos under video collection and add to the topic node
Args:
url (str): url to video page (e.g. https://www.exploratorium.edu/video/inflatable-jimmy-kuehnle)
topic (TopicNode): topic to add video nodes to
"""
try:
collection_contents = BeautifulSoup(read(url), 'html5lib')
for result in collection_contents.find_all('div', {'class': 'search-result'}):
header = result.find('div', {'class': 'views-field-field-html-title'})
LOGGER.info(" {}".format(header.text.strip()))
# Get video from given url
description = result.find('div', {'class': 'search-description'})
video_contents = BeautifulSoup(read(header.find('a')['href']), 'html.parser')
for k, v in get_brightcove_mapping(video_contents).items():
video_node = nodes.VideoNode(
source_id = k,
title = header.text.strip().replace("’", "'"),
description = description.text.strip() if description else "",
license = LICENSE,
copyright_holder = COPYRIGHT_HOLDER,
author = v.get('author') or "",
files = [files.WebVideoFile(v['url'], high_resolution=False)],
thumbnail = get_thumbnail_url(result.find('img')['src']),
)
# If video doesn't already exist here, add to topic
if not next((c for c in topic.children if c.source_id == video_node.source_id), None):
topic.add_child(video_node)
# Scrape next page (if any)
next_page_url = get_next_page_url(collection_contents)
if next_page_url:
scrape_video_collection(next_page_url, topic)
except requests.exceptions.HTTPError:
LOGGER.error("Could not read collection at {}".format(url))
# Activity scraping functions
################################################################################
def scrape_snack_menu(url):
""" Scrape snacks (activities) from url
Args:
url (str): url to scrape from (e.g. https://www.exploratorium.edu/snacks/snacks-by-subject)
Returns TopicNode containing all snacks
"""
LOGGER.info("SCRAPING ACTIVITIES...")
snack_topic = nodes.TopicNode(title="Activities", source_id="main-topic-activities")
contents = BeautifulSoup(read(url), 'html5lib')
# Get #main-content-container .field-items
contents = contents.find('div', {'id': 'main-content-container'})\
.find('div', {'class': 'field-items'})
for column in contents.find_all('ul', {'class': 'menu'}):
# Skip nested .menu list items (captured in subdirectory)
if column.parent.name == 'li':
continue
# Go through top-level li elements
for li in column.find_all('li', recursive=False):
link = li.find('a')
LOGGER.info(" {}".format(link['title']))
topic = nodes.TopicNode(title=link['title'].replace("’", "'"), source_id=link['href'])
snack_topic.add_child(topic)
# Scrape subcategories (if any)
if li.find('ul'):
for sublink in li.find('ul').find_all('a'):
LOGGER.info(" > {}".format(sublink['title']))
subtopic = nodes.TopicNode(title=sublink['title'].replace("’", "'"), source_id=sublink['href'])
topic.add_child(subtopic)
scrape_snack_subject(sublink['href'], subtopic)
else:
scrape_snack_subject(link['href'], topic)
return snack_topic
def scrape_snack_subject(slug, topic):
""" Scrape snack subject page
Args:
slug (str): url slug to scrape from (e.g. /subject/arts)
topic (TopicNode): topic to add html nodes to
"""
contents = BeautifulSoup(read(slug), 'html5lib')
for activity in contents.find_all('div', {'class': 'activity'}):
LOGGER.info(" {}".format(activity.find('h5').text.strip()))
# Scrape snack pages into zips
write_to_path, tags = scrape_snack_page(activity.find('a')['href'])
if not write_to_path:
continue
# Create html node
description = activity.find('div', {'class': 'pod-description'})
topic.add_child(nodes.HTML5AppNode(
source_id = activity.find('a')['href'],
title = activity.find('h5').text.strip().replace("’", "'"),
description = description.text.strip() if description else "",
license = LICENSE,
copyright_holder = COPYRIGHT_HOLDER,
files = [files.HTMLZipFile(path=write_to_path)],
thumbnail = get_thumbnail_url(activity.find('img')['src']),
tags=tags,
))
# Scrape next page (if any)
next_page_url = get_next_page_url(contents)
if next_page_url:
scrape_snack_subject(next_page_url, topic)
def scrape_snack_page(slug, attempts=5):
""" Writes activity to a zipfile
Args:
slug (str): url slug (e.g. /snacks/drawing-board)
attemps (int): number of times to attempt a download
Returns
write_to_path (str): path to generated zip
tags ([str]): list of tags scraped from activity page
"""
tags = []
write_to_path = os.path.sep.join([SNACK_DIRECTORY, "{}.zip".format(slug.split('/')[-1])])
try:
contents = BeautifulSoup(read(slug), 'html5lib')
main_contents = contents.find('div', {'class': 'activity'})
# Gather keywords from page
tags.extend(scrape_keywords(main_contents, 'field-name-field-activity-subject'))
tags.extend(scrape_keywords(main_contents, 'field-name-field-activity-tags'))
# Don't rezip activities that have already been zipped
if os.path.isfile(write_to_path):
return write_to_path, tags
with html_writer.HTMLWriter(write_to_path) as zipper:
write_contents = BeautifulSoup("", "html5lib")
# Scrape stylesheets
for stylesheet in contents.find_all('link', {'rel': 'stylesheet'}):
# Don't scrape external style sheets (e.g. fontawesome, google fonts)
if "exploratorium.edu" not in stylesheet['href']:
continue
style_contents = scrape_style(stylesheet['href'], zipper)
filename = stylesheet['href'].split('/')[-1]
stylesheet['href'] = zipper.write_contents(filename, style_contents, directory="css")
write_contents.head.append(stylesheet)
# Remove scripts and any unneeded sections
cluster = main_contents.find('div', {'id': 'curated-cluster'})
cluster and cluster.decompose()
service_links = main_contents.find('div', {'class': 'activity-service-links'})
service_links and service_links.decompose()
for script in main_contents.find_all("script"):
script.decompose()
# Get rid of hardcoded height/width on slideshow element
slideshow = main_contents.find('div', {'class': 'field-slideshow'})
if slideshow:
del slideshow['style']
# Add images
for img in main_contents.find_all('img'):
img['src'] = zipper.write_url(format_url(img['src']), img['src'].split('/')[-1], directory="images")
# Add videos embedded from youtube
for video in main_contents.find_all('div', {'class': 'yt-player'}):
yt_video_path = download_web_video(video['data-ytid'], "{}.mp4".format(video['data-ytid']))
video_tag = generate_video_tag(yt_video_path, zipper)
video_tag['style'] = video.find('div', {'class': 'placeholder'}).get('style')
video.replaceWith(video_tag)
# Add videos embedded from brightcove and remove playlist element (if any)
for k, v in get_brightcove_mapping(main_contents, get_playlist=True).items():
video_path = download_web_video(v['url'], "{}.mp4".format(k))
if v.get('original_el'):
v['original_el'].replaceWith(generate_video_tag(video_path, zipper))
elif v.get('append_to'):
if v.get('title'):
p_tag = contents.new_tag("p")
p_tag.string = v['title']
p_tag['style'] = "margin-top: 40px; margin-bottom: 10px"
v['append_to'].parent.append(p_tag)
v['append_to'].parent.append(generate_video_tag(video_path, zipper))
playlist = main_contents.find('div', {'id': 'media-collection-banner-playlist'})
if playlist:
playlist.decompose()
# Handle links (need to start with parent as beautifulsoup returns parent as None on links)
for paragraph in main_contents.find_all('p') + main_contents.find_all('li'):
for link in paragraph.find_all('a'):
# Skip any previously parsed links
if zipper.contains(link['href']):
continue
# Just bold activities and remove link
elif "exploratorium.edu/snacks/" in link['href']:
bold_tag = contents.new_tag("b")
bold_tag.string = link.text
link.replaceWith(bold_tag)
# If it's an image, replace the tag with just the image
elif link.find('img'):
link.replaceWith(link.find('img'))
# Get downloadable files and attach them to new pages
elif "/sites/default/files/" in link['href']:
link['href'] = generate_download_page(link['href'], zipper)
# Get any referenced videos
elif "exploratorium.edu" in link['href']:
linked_page = BeautifulSoup(read(link['href']), 'html5lib')
link.replaceWith(link.text.replace(link['href'], ''))
for k, v in get_brightcove_mapping(linked_page).items():
video_path = download_web_video(v['url'], "{}.mp4".format(k))
paragraph.append(generate_video_tag(video_path, zipper))
# Scrape any images
elif next((e for e in IMAGE_EXTENSIONS if link['href'].lower().endswith(e)), None):
img_tag = contents.new_tag('img')
img_tag['src'] = zipper.write_url(link['href'], link['href'].split('/')[-1], directory="images")
img_tag['style'] = "max-width: 100%;"
paragraph.append(img_tag)
link.replaceWith(link.text)
# Remove hyperlink from external links
else:
if link['href'] not in link.text and link.text not in link['href']:
link.string += " ({}) ".format(link['href'])
link.replaceWith(link.text)
# Write contents and custom tags
write_contents.body.append(main_contents)
write_contents.head.append(generate_custom_style_tag()) # Add custom style tag
write_contents.body.append(generate_custom_script_tag()) # Add custom script to handle slideshow
# Write main index.html file
zipper.write_index_contents(write_contents.prettify().encode('utf-8-sig'))
except Exception as e:
# Reattempt if there are attempts left
if attempts > 0:
return scrape_snack_page(slug, attempts=attempts-1)
else:
LOGGER.error("Could not scrape {} ({})".format(slug, str(e)))
return write_to_path, tags
def generate_download_page(url, zipper):
""" Create a page for files that are meant to be downloaded (e.g. worksheets)
Args:
url (str): url to file that is meant to be downloaded
zipper (html_writer): where to write download page to
Returns path to page in zipfile (str)
"""
# Get template soup
soup = BeautifulSoup("", "html.parser")
with open('download.html', 'rb') as templatecode:
newpage = BeautifulSoup(templatecode.read(), 'html5lib')
# Determine if link is one of the recognized file types
download_url = url.split("?")[0]
filename = download_url.split("/")[-1]
if download_url.endswith('pdf'):
render_tag = soup.new_tag('embed')
elif next((e for e in IMAGE_EXTENSIONS if download_url.lower().endswith(e)), None):
render_tag = soup.new_tag('img')
else:
LOGGER.error("Unknown file type found at {}".format(download_url))
return ""
# Add tag to new page and write page to zip
render_tag['src'] = zipper.write_url(format_url(download_url), filename)
newpage.body.append(render_tag)
return zipper.write_contents(filename.split('.')[0] + ".html", newpage.prettify())
def generate_video_tag(filepath, zipper):
""" Downloads video into zip and creates a corresponding <video> tag
Args:
filepath (str): path to video to zip
zipper (html_writer): where to write video to
Returns <video> tag
"""
soup = BeautifulSoup("", "html.parser")
video_tag = soup.new_tag("video")
source_tag = soup.new_tag("source")
source_tag['src'] = zipper.write_file(filepath, directory="videos")
source_tag['type'] = "video/mp4"
video_tag['controls'] = 'true'
video_tag['style'] = "width: 100%;"
video_tag.append(source_tag)
return video_tag
def generate_custom_style_tag():
""" Creates a custom style tag with extra css rules to add to zips
Returns <style> tag
"""
soup = BeautifulSoup("", "html.parser")
style_tag = soup.new_tag('style')
style_tag.string = "body { padding: 50px; }"
style_tag.string += ".activity {max-width: 900; margin: auto;}"
style_tag.string += ".underline { text-decoration: underline; }"
style_tag.string += "b, strong, h1, h3 {font-weight: 700 !important;}"
style_tag.string += "body, h1, h2, h3, h4, h5, h6, p, table, tr, td, th, ul, li, ol, dd, dl"
style_tag.string += "{ font-family: \"Trebuchet MS\", Helvetica, sans-serif !important; }"
style_tag.string += ".bcVideoWrapper:after {padding-top: 0 !important; }"
style_tag.string += "#media-collection-banner-content-container {background-color: transparent !important}"
style_tag.string += "#media-collection-banner-content-container #media-collection-video-container"
style_tag.string += "{ float: none; width: 100%; }"
return style_tag
def generate_custom_script_tag():
""" Creates a custom script tag to handle slideshow elements
Returns <script> tag
"""
soup = BeautifulSoup("", "html.parser")
script_tag = soup.new_tag('script')
script_tag["type"] = "text/javascript"
script_tag.string = "var image = document.getElementsByClassName('field-slideshow-image-1')[0];"
script_tag.string += "var tn = document.getElementsByClassName('field-slideshow-thumbnail');"
script_tag.string += "function setImage(tn) {image.setAttribute('src', tn.getAttribute('src'));}"
script_tag.string += "if(tn.length){setInterval(function() {setImage(tn[Math.floor(Math.random()*tn.length)]);}, 3000);"
script_tag.string += "for (var i = 0; i < tn.length; i++)"
script_tag.string += "tn[i].addEventListener('click', function(ev) {setImage(ev.target);}, false);}"
return script_tag
def download_web_video(url, filename):
""" Downloads a web video to the video directory
Args:
url (str): url to video to download
filename (str): name to save video under
Returns local path to video (str)
"""
# Generate write to path and download if it doesn't exist yet
write_to_path = os.path.sep.join([VIDEO_DIRECTORY, filename])
if not os.path.isfile(write_to_path):
download(url, write_to_path)
return write_to_path
def download(url, write_to_path, attempts=DOWNLOAD_ATTEMPTS):
""" Download the web video
Args:
url (str): url to video to download
write_to_path (str): where to write video to
attempts (int): how many times to reattempt a download
"""
try:
video_format = "bestvideo[height<=480][ext=mp4]+bestaudio[ext=m4a]/best[height<=480][ext=mp4]"
with youtube_dl.YoutubeDL({"format": video_format, "outtmpl": write_to_path}) as ydl:
ydl.download([url])
except youtube_dl.utils.DownloadError as e:
# If there are more attempts, try again. Otherwise, return error
if attempts > 0:
download(url, write_to_path, attempts=attempts-1)
else:
LOGGER.error("Could not download video {} ({})".format(url, str(e)))
raise e
def scrape_keywords(contents, el):
""" Scrape page contents for keywords
Args:
contents (BeautifulSoup): contents to scrape
el (str): element class to look for
Returns list of tags ([str])
"""
soup = BeautifulSoup("<div></div>", "html.parser")
tags = []
keyword_section = contents.find('div', {'class': el})
if keyword_section:
for related in keyword_section.find_all('a'):
i_tag = soup.new_tag('span')
i_tag.string = related.text
i_tag['class'] = "underline"
tags.append(related.text[:30])
related.replaceWith(i_tag) # Remove links to other pages
return tags
def scrape_style(url, zipper):
""" Scrape any instances of url(...)
Args:
url (str): url to css file
zipper (html_writer): zip to write to
Returns str of css style rules
"""
sheet = cssutils.parseUrl(url)
rules = sheet.cssText.decode('utf-8')
# Parse urls in css
for url in cssutils.getUrls(sheet):
try:
# Download any urls in css to the shared asset directory (if not already there)
filename = url.split('?')[0].split('/')[-1]
filepath = os.path.sep.join([SHARED_ASSET_DIRECTORY, filename])
if not os.path.isfile(filepath):
with open(filepath, 'wb') as fobj:
fobj.write(read(url))
# Replace text with new url
new_url = zipper.write_file(filepath, filename, directory="assets")
rules = rules.replace(url, "../" + new_url)
except requests.exceptions.HTTPError:
LOGGER.warning("Could not download css url {}".format(url))
return rules
# CLI
################################################################################
if __name__ == '__main__':
# This code runs when sushichef.py is called from the command line
chef = MyChef()
chef.main()
| [
"os.path.exists",
"ricecooker.classes.files.HTMLZipFile",
"PIL.Image.open",
"ricecooker.exceptions.raise_for_invalid_channel",
"ricecooker.classes.files.WebVideoFile",
"os.makedirs",
"ricecooker.config.LOGGER.info",
"ricecooker.classes.nodes.TopicNode",
"cssutils.parseUrl",
"cssutils.log.setLevel",
"bs4.BeautifulSoup",
"cssutils.getUrls",
"os.path.sep.join",
"os.path.realpath",
"os.path.isfile",
"youtube_dl.YoutubeDL",
"ricecooker.utils.html_writer.HTMLWriter"
] | [((580, 619), 'cssutils.log.setLevel', 'cssutils.log.setLevel', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (601, 619), False, 'import cssutils\n'), ((2598, 2650), 'os.path.sep.join', 'os.path.sep.join', (["[SNACK_DIRECTORY, 'shared-assets']"], {}), "([SNACK_DIRECTORY, 'shared-assets'])\n", (2614, 2650), False, 'import os\n'), ((2210, 2241), 'os.path.exists', 'os.path.exists', (['SNACK_DIRECTORY'], {}), '(SNACK_DIRECTORY)\n', (2224, 2241), False, 'import os\n'), ((2247, 2275), 'os.makedirs', 'os.makedirs', (['SNACK_DIRECTORY'], {}), '(SNACK_DIRECTORY)\n', (2258, 2275), False, 'import os\n'), ((2422, 2453), 'os.path.exists', 'os.path.exists', (['VIDEO_DIRECTORY'], {}), '(VIDEO_DIRECTORY)\n', (2436, 2453), False, 'import os\n'), ((2459, 2487), 'os.makedirs', 'os.makedirs', (['VIDEO_DIRECTORY'], {}), '(VIDEO_DIRECTORY)\n', (2470, 2487), False, 'import os\n'), ((2658, 2696), 'os.path.exists', 'os.path.exists', (['SHARED_ASSET_DIRECTORY'], {}), '(SHARED_ASSET_DIRECTORY)\n', (2672, 2696), False, 'import os\n'), ((2702, 2737), 'os.makedirs', 'os.makedirs', (['SHARED_ASSET_DIRECTORY'], {}), '(SHARED_ASSET_DIRECTORY)\n', (2713, 2737), False, 'import os\n'), ((8782, 8815), 'ricecooker.config.LOGGER.info', 'LOGGER.info', (['"""SCRAPING VIDEOS..."""'], {}), "('SCRAPING VIDEOS...')\n", (8793, 8815), False, 'from ricecooker.config import LOGGER\n'), ((8834, 8896), 'ricecooker.classes.nodes.TopicNode', 'nodes.TopicNode', ([], {'title': '"""Videos"""', 'source_id': '"""main-topic-videos"""'}), "(title='Videos', source_id='main-topic-videos')\n", (8849, 8896), False, 'from ricecooker.classes import nodes, files, questions\n'), ((12703, 12740), 'ricecooker.config.LOGGER.info', 'LOGGER.info', (['"""SCRAPING ACTIVITIES..."""'], {}), "('SCRAPING ACTIVITIES...')\n", (12714, 12740), False, 'from ricecooker.config import LOGGER\n'), ((12759, 12829), 'ricecooker.classes.nodes.TopicNode', 'nodes.TopicNode', ([], {'title': '"""Activities"""', 'source_id': '"""main-topic-activities"""'}), "(title='Activities', source_id='main-topic-activities')\n", (12774, 12829), False, 'from ricecooker.classes import nodes, files, questions\n'), ((22825, 22857), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""html.parser"""'], {}), "('', 'html.parser')\n", (22838, 22857), False, 'from bs4 import BeautifulSoup\n'), ((23964, 23996), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""html.parser"""'], {}), "('', 'html.parser')\n", (23977, 23996), False, 'from bs4 import BeautifulSoup\n'), ((24466, 24498), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""html.parser"""'], {}), "('', 'html.parser')\n", (24479, 24498), False, 'from bs4 import BeautifulSoup\n'), ((25509, 25541), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""html.parser"""'], {}), "('', 'html.parser')\n", (25522, 25541), False, 'from bs4 import BeautifulSoup\n'), ((26586, 26631), 'os.path.sep.join', 'os.path.sep.join', (['[VIDEO_DIRECTORY, filename]'], {}), '([VIDEO_DIRECTORY, filename])\n', (26602, 26631), False, 'import os\n'), ((27840, 27883), 'bs4.BeautifulSoup', 'BeautifulSoup', (['"""<div></div>"""', '"""html.parser"""'], {}), "('<div></div>', 'html.parser')\n", (27853, 27883), False, 'from bs4 import BeautifulSoup\n'), ((28520, 28542), 'cssutils.parseUrl', 'cssutils.parseUrl', (['url'], {}), '(url)\n', (28537, 28542), False, 'import cssutils\n'), ((28625, 28648), 'cssutils.getUrls', 'cssutils.getUrls', (['sheet'], {}), '(sheet)\n', (28641, 28648), False, 'import cssutils\n'), ((2151, 2177), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2167, 2177), False, 'import os\n'), ((2363, 2389), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2379, 2389), False, 'import os\n'), ((5397, 5431), 'ricecooker.exceptions.raise_for_invalid_channel', 'raise_for_invalid_channel', (['channel'], {}), '(channel)\n', (5422, 5431), False, 'from ricecooker.exceptions import raise_for_invalid_channel\n'), ((16342, 16371), 'os.path.isfile', 'os.path.isfile', (['write_to_path'], {}), '(write_to_path)\n', (16356, 16371), False, 'import os\n'), ((26643, 26672), 'os.path.isfile', 'os.path.isfile', (['write_to_path'], {}), '(write_to_path)\n', (26657, 26672), False, 'import os\n'), ((6697, 6716), 'PIL.Image.open', 'Image.open', (['imgfile'], {}), '(imgfile)\n', (6707, 6716), False, 'from PIL import Image\n'), ((16426, 16463), 'ricecooker.utils.html_writer.HTMLWriter', 'html_writer.HTMLWriter', (['write_to_path'], {}), '(write_to_path)\n', (16448, 16463), False, 'from ricecooker.utils import downloader, html_writer\n'), ((16504, 16533), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""html5lib"""'], {}), "('', 'html5lib')\n", (16517, 16533), False, 'from bs4 import BeautifulSoup\n'), ((27150, 27222), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (["{'format': video_format, 'outtmpl': write_to_path}"], {}), "({'format': video_format, 'outtmpl': write_to_path})\n", (27170, 27222), False, 'import youtube_dl\n'), ((28834, 28886), 'os.path.sep.join', 'os.path.sep.join', (['[SHARED_ASSET_DIRECTORY, filename]'], {}), '([SHARED_ASSET_DIRECTORY, filename])\n', (28850, 28886), False, 'import os\n'), ((28906, 28930), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (28920, 28930), False, 'import os\n'), ((15146, 15183), 'ricecooker.classes.files.HTMLZipFile', 'files.HTMLZipFile', ([], {'path': 'write_to_path'}), '(path=write_to_path)\n', (15163, 15183), False, 'from ricecooker.classes import nodes, files, questions\n'), ((11672, 11723), 'ricecooker.classes.files.WebVideoFile', 'files.WebVideoFile', (["v['url']"], {'high_resolution': '(False)'}), "(v['url'], high_resolution=False)\n", (11690, 11723), False, 'from ricecooker.classes import nodes, files, questions\n')] |
# coding=utf-8
# project: GitHub Tools Shell
# file: shell.py
# author: MacWinLin Studio CGK Team
# email: <EMAIL>
# version: LTS(Long Term Support) 2.0
# Publish only on GitHub and MacWinLin Studio's GitLab.
# Copyright 2022 MacWinLin Studio.All rights reserved.
# Make unofficial GUI client please view core file.
print('Github Tools Shell 2.0')
from core.language_core import UpdateLanguage,cdatabase,rdatabase
from core.githut_core import run
from requests import get
from platform import platform
from json import loads
from os import mkdir,rename,remove
from shutil import rmtree
import sqlite3,logging
language = UpdateLanguage
language.reload(self=language)
class logClass:
def __init__():
logging.basicConfig(level=logging.DEBUG,filemode='a+',filename='githut-log.log',format="%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s",datefmt="%Y-%m-%d %H:%M:%S")
def debug(e):
logging.debug(str(e))
def info(e):
logging.info(str(e))
def warning(e):
logging.warning(str(e))
def error(e):
logging.error(str(e))
def critical(e):
logging.critical(str(e))
def exception(e):
logging.exception(e)
log = logClass
# Connect to database and config basic information
con = sqlite3.connect('.mwl-githut-data.db')
log.info('Connected to database')
cur = con.cursor()
cur.execute('select * from data')
cache = cur.fetchone()
ver = cache[6]
auto = cache[7]
updateServer = cache[12]
cur.close()
con.close()
def main(state=0):
if state == 0:
while True:
tml = input('>')
if tml != 'update':
run(tml)
else:
updateFunction()
class Update():
def latest(self):
try:
cache = get(updateServer + '/latest.json')
except Exception:
log.warning('Could\'t get latest GitHub Tools version')
return ['error']
else:
cache = cache.text
cache = loads(cache)
self.latestVersion = cache['latest']
self.latestVersionLink = cache['link']
self.updateFile = cache['files']
return [self.latestVersion,self.latestVersionLink,self.updateFile]
update = Update
def haveField(cur,field):
try:
cur.execute('select {} from data where id=1'.format(field))
except sqlite3.OperationalError:
log.warning('HaveFidle Check Program:Database didn\'t have \'{}\' field'.format(field))
return False
return True
# Update
def updateFunction():
log.info('Finding latest version')
latest = update.latest(self=update)
if len(latest) == 3:
if ver != latest[0]:
tml = input(language.haveNew)
if tml in ['y','Y','n','N']:
if tml in ['y','Y']:
fileCache = []
backupDatabaseId = rdatabase()
for i in range(len(latest[2])):
try:
print('Geting {} file'.format(latest[2][i]))
cache = get(latest[1] + latest[2][i]).text
except Exception as e:
print(language.downloadE)
log.error('Could\'t get update')
log.exception(e)
else:
print('Got {} file'.format(latest[2][i]))
fileCache.append(cache)
rename('core','core-backup')
log.info('Renamed original core dir to backup core dir')
mkdir('core')
log.info('Made new core dir')
if 'Windows' in platform():
for i in range(len(latest[2])):
cache = 'core\\' + latest[2][i]
file = open(cache,'w')
file.write(fileCache[i])
file.close()
log.info('Make new file \'{}\''.format(latest[2][i]))
else:
for i in range(len(latest[2])):
cache = 'core/' + latest[2][i]
file = open(cache,'w')
file.write(fileCache[i])
file.close()
log.info('Make new file \'{}\''.format(latest[2][i]))
log.info('Wrote file okay')
rmtree('core-backup')
log.info('Deleted backup dir')
con = sqlite3.connect('.mwl-githut-data.db')
cur = con.cursor()
cur.execute('select * from data')
cache = cur.fetchone()
backupDatabaseText = ['1',*list(cache[1:])]
cur.close()
con.close()
remove('.mwl-githut-data.db')
cdatabase()
con = sqlite3.connect('.mwl-githut-data.db')
cur = con.cursor()
for i in range(len(backupDatabaseId)):
if haveField(cur,backupDatabaseId[i]):
cache = 'UPDATE data SET {}={} WHERE id=1'.format(backupDatabaseId[i],backupDatabaseText[i])
cur.execute(cache)
con.commit()
cur.close()
con.close()
log.info('Updated to {}'.format(latest[0]))
print('Updated to {}'.format(latest[0]))
else:
log.info('This version is latest version now!')
main()
if auto == 1:
updateFunction()
main()
else:
main()
| [
"logging.basicConfig",
"json.loads",
"sqlite3.connect",
"core.language_core.cdatabase",
"core.language_core.rdatabase",
"os.rename",
"platform.platform",
"requests.get",
"logging.exception",
"core.githut_core.run",
"os.mkdir",
"shutil.rmtree",
"os.remove"
] | [((1289, 1327), 'sqlite3.connect', 'sqlite3.connect', (['""".mwl-githut-data.db"""'], {}), "('.mwl-githut-data.db')\n", (1304, 1327), False, 'import sqlite3, logging\n'), ((710, 936), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'filemode': '"""a+"""', 'filename': '"""githut-log.log"""', 'format': '"""%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.DEBUG, filemode='a+', filename=\n 'githut-log.log', format=\n '%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s'\n , datefmt='%Y-%m-%d %H:%M:%S')\n", (729, 936), False, 'import sqlite3, logging\n'), ((1196, 1216), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (1213, 1216), False, 'import sqlite3, logging\n'), ((1784, 1818), 'requests.get', 'get', (["(updateServer + '/latest.json')"], {}), "(updateServer + '/latest.json')\n", (1787, 1818), False, 'from requests import get\n'), ((2007, 2019), 'json.loads', 'loads', (['cache'], {}), '(cache)\n', (2012, 2019), False, 'from json import loads\n'), ((1653, 1661), 'core.githut_core.run', 'run', (['tml'], {}), '(tml)\n', (1656, 1661), False, 'from core.githut_core import run\n'), ((2891, 2902), 'core.language_core.rdatabase', 'rdatabase', ([], {}), '()\n', (2900, 2902), False, 'from core.language_core import UpdateLanguage, cdatabase, rdatabase\n'), ((3507, 3536), 'os.rename', 'rename', (['"""core"""', '"""core-backup"""'], {}), "('core', 'core-backup')\n", (3513, 3536), False, 'from os import mkdir, rename, remove\n'), ((3633, 3646), 'os.mkdir', 'mkdir', (['"""core"""'], {}), "('core')\n", (3638, 3646), False, 'from os import mkdir, rename, remove\n'), ((4524, 4545), 'shutil.rmtree', 'rmtree', (['"""core-backup"""'], {}), "('core-backup')\n", (4530, 4545), False, 'from shutil import rmtree\n'), ((4623, 4661), 'sqlite3.connect', 'sqlite3.connect', (['""".mwl-githut-data.db"""'], {}), "('.mwl-githut-data.db')\n", (4638, 4661), False, 'import sqlite3, logging\n'), ((4946, 4975), 'os.remove', 'remove', (['""".mwl-githut-data.db"""'], {}), "('.mwl-githut-data.db')\n", (4952, 4975), False, 'from os import mkdir, rename, remove\n'), ((4996, 5007), 'core.language_core.cdatabase', 'cdatabase', ([], {}), '()\n', (5005, 5007), False, 'from core.language_core import UpdateLanguage, cdatabase, rdatabase\n'), ((5034, 5072), 'sqlite3.connect', 'sqlite3.connect', (['""".mwl-githut-data.db"""'], {}), "('.mwl-githut-data.db')\n", (5049, 5072), False, 'import sqlite3, logging\n'), ((3733, 3743), 'platform.platform', 'platform', ([], {}), '()\n', (3741, 3743), False, 'from platform import platform\n'), ((3093, 3122), 'requests.get', 'get', (['(latest[1] + latest[2][i])'], {}), '(latest[1] + latest[2][i])\n', (3096, 3122), False, 'from requests import get\n')] |
from django.db import models
from django_postgres_extensions.models.fields import HStoreField, JSONField, ArrayField
from django_postgres_extensions.models.fields.related import ArrayManyToManyField
from django import forms
from django.contrib.postgres.forms import SplitArrayField
from django_postgres_extensions.forms.fields import NestedFormField
details_fields = (
('Brand', NestedFormField(keys=('Name', 'Country'))),
('Type', forms.CharField(max_length=25, required=False)),
('Colours', SplitArrayField(base_field=forms.CharField(max_length=10, required=False), size=10)),
)
class Buyer(models.Model):
time = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=15)
keywords = ArrayField(models.CharField(max_length=20), default=[], form_size=10, blank=True)
sports = ArrayField(models.CharField(max_length=20),default=[], blank=True, choices=(
('football', 'Football'), ('tennis', 'Tennis'), ('golf', 'Golf'), ('basketball', 'Basketball'), ('hurling', 'Hurling'), ('baseball', 'Baseball')))
shipping = HStoreField(keys=('Address', 'City', 'Region', 'Country'), blank=True, default={})
details = JSONField(fields=details_fields, blank=True, default={})
buyers = ArrayManyToManyField(Buyer)
def __str__(self):
return self.name
@property
def country(self):
return self.shipping.get('Country', '')
| [
"django_postgres_extensions.forms.fields.NestedFormField",
"django.forms.CharField",
"django_postgres_extensions.models.fields.related.ArrayManyToManyField",
"django_postgres_extensions.models.fields.HStoreField",
"django_postgres_extensions.models.fields.JSONField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((635, 674), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (655, 674), False, 'from django.db import models\n'), ((686, 717), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (702, 717), False, 'from django.db import models\n'), ((808, 839), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (824, 839), False, 'from django.db import models\n'), ((1193, 1279), 'django_postgres_extensions.models.fields.HStoreField', 'HStoreField', ([], {'keys': "('Address', 'City', 'Region', 'Country')", 'blank': '(True)', 'default': '{}'}), "(keys=('Address', 'City', 'Region', 'Country'), blank=True,\n default={})\n", (1204, 1279), False, 'from django_postgres_extensions.models.fields import HStoreField, JSONField, ArrayField\n'), ((1290, 1346), 'django_postgres_extensions.models.fields.JSONField', 'JSONField', ([], {'fields': 'details_fields', 'blank': '(True)', 'default': '{}'}), '(fields=details_fields, blank=True, default={})\n', (1299, 1346), False, 'from django_postgres_extensions.models.fields import HStoreField, JSONField, ArrayField\n'), ((1361, 1388), 'django_postgres_extensions.models.fields.related.ArrayManyToManyField', 'ArrayManyToManyField', (['Buyer'], {}), '(Buyer)\n', (1381, 1388), False, 'from django_postgres_extensions.models.fields.related import ArrayManyToManyField\n'), ((384, 425), 'django_postgres_extensions.forms.fields.NestedFormField', 'NestedFormField', ([], {'keys': "('Name', 'Country')"}), "(keys=('Name', 'Country'))\n", (399, 425), False, 'from django_postgres_extensions.forms.fields import NestedFormField\n'), ((442, 488), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(25)', 'required': '(False)'}), '(max_length=25, required=False)\n', (457, 488), False, 'from django import forms\n'), ((866, 897), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (882, 897), False, 'from django.db import models\n'), ((961, 992), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (977, 992), False, 'from django.db import models\n'), ((535, 581), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(10)', 'required': '(False)'}), '(max_length=10, required=False)\n', (550, 581), False, 'from django import forms\n')] |
import os
from os.path import join
import numpy as np
from tqdm import trange
import skimage as ski
import skimage.data, skimage.transform
data_dir = '/home/kivan/datasets/Cityscapes/orig/test'
labels_dir = '/home/kivan/datasets/Cityscapes/orig/gtFine/test'
#save_dir = '/home/kivan/datasets/Cityscapes/masked/croped/test'
save_dir = '/home/kivan/datasets/Cityscapes/masked/mean/full/test'
rgb_mean = [75, 85, 75]
cities = next(os.walk(data_dir))[1]
for city in cities:
city_dir = join(data_dir, city)
image_list = next(os.walk(city_dir))[2]
print(city)
os.makedirs(join(save_dir, city), exist_ok=True)
for i in trange(len(image_list)):
img = ski.data.load(join(city_dir, image_list[i]))
img_prefix = image_list[i][:-16]
mask_path = join(labels_dir, city, img_prefix + '_gtFine_labelIds.png')
mask_img = ski.data.load(mask_path)
#img[mask_img==1] = 0
#img[mask_img==2] = 0
#img[mask_img==3] = 0
img[mask_img==1] = rgb_mean
height = img.shape[0]
img[height-5:,...] = rgb_mean
#img[mask_img==2] = rgb_mean
##img[mask_img==3] = rgb_mean
#img = np.ascontiguousarray(img[:896,...])
save_path = join(save_dir, city, image_list[i])
ski.io.imsave(save_path, img)
| [
"skimage.data.load",
"os.path.join",
"skimage.io.imsave",
"os.walk"
] | [((488, 508), 'os.path.join', 'join', (['data_dir', 'city'], {}), '(data_dir, city)\n', (492, 508), False, 'from os.path import join\n'), ((433, 450), 'os.walk', 'os.walk', (['data_dir'], {}), '(data_dir)\n', (440, 450), False, 'import os\n'), ((579, 599), 'os.path.join', 'join', (['save_dir', 'city'], {}), '(save_dir, city)\n', (583, 599), False, 'from os.path import join\n'), ((760, 819), 'os.path.join', 'join', (['labels_dir', 'city', "(img_prefix + '_gtFine_labelIds.png')"], {}), "(labels_dir, city, img_prefix + '_gtFine_labelIds.png')\n", (764, 819), False, 'from os.path import join\n'), ((835, 859), 'skimage.data.load', 'ski.data.load', (['mask_path'], {}), '(mask_path)\n', (848, 859), True, 'import skimage as ski\n'), ((1162, 1197), 'os.path.join', 'join', (['save_dir', 'city', 'image_list[i]'], {}), '(save_dir, city, image_list[i])\n', (1166, 1197), False, 'from os.path import join\n'), ((1202, 1231), 'skimage.io.imsave', 'ski.io.imsave', (['save_path', 'img'], {}), '(save_path, img)\n', (1215, 1231), True, 'import skimage as ski\n'), ((529, 546), 'os.walk', 'os.walk', (['city_dir'], {}), '(city_dir)\n', (536, 546), False, 'import os\n'), ((676, 705), 'os.path.join', 'join', (['city_dir', 'image_list[i]'], {}), '(city_dir, image_list[i])\n', (680, 705), False, 'from os.path import join\n')] |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
USER_CREATE_URL = reverse('users:create')
USER_DETAILS_URL = reverse('users:details_and_update')
USER_TOKEN_URL = reverse('users:token')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUsersApiTests(TestCase):
"""Tests the users api public methods"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Tests if creating a user with valid payload is successfull"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Big Docker Guy'
}
response = self.client.post(USER_CREATE_URL, payload)
user = get_user_model().objects.get(**response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', response.data)
def test_user_exists(self):
"""Tests whether a user exists"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
create_user(**payload)
response = self.client.post(USER_CREATE_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Tests if a given password is at least 6 characters long"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Big Docker Guy'
}
response = self.client.post(USER_CREATE_URL, payload)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(user_exists)
def test_create_user_token(self):
"""Tests if a token is successfully created"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
}
create_user(**payload)
response = self.client.post(USER_TOKEN_URL, {
'email': payload['email'], 'password': payload['password'],
})
self.assertIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_user_token_invalid_credentials(self):
"""Tests if token is not createad when invalid credentials are given"""
email = '<EMAIL>'
payload = {
'email': email,
'password': '<PASSWORD>',
}
create_user(email=email, password='<PASSWORD>')
response = self.client.post(USER_TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_user_token_missing_field(self):
"""Tests if email and password are required"""
payload_no_email = {'email': '', 'password': '<PASSWORD>'}
response_no_email = self.client.post(USER_TOKEN_URL, payload_no_email)
payload_no_password = {'email': '<EMAIL>', 'password': ''}
response_no_password = self.client.post(
USER_TOKEN_URL, payload_no_password
)
self.assertNotIn('token', response_no_email.data)
self.assertEqual(
response_no_email.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertNotIn('token', response_no_password.data)
self.assertEqual(
response_no_password.status_code, status.HTTP_400_BAD_REQUEST
)
def test_create_user_token_user_not_found(self):
"""Tests if token is not created to a non-existent user"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
response = self.client.post(USER_TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_authentication_is_required_for_user_details(self):
"""Tests if authentication is required for fetching the user details"""
response = self.client.get(USER_DETAILS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUsersApiTests(TestCase):
"""Tests the users api private methods"""
def setUp(self):
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='Big Dock Guy'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_access_user_details_successfull(self):
"""Tests fetching the logged in user's detail"""
response = self.client.get(USER_DETAILS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_to_user_details_not_allowed(self):
"""Tests if the POST method is not allowed on the user details url"""
response = self.client.post(USER_DETAILS_URL)
self.assertEqual(
response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED
)
def test_update_user_successfull(self):
"""Tests if updating the user is working"""
payload = {
'name': 'New Big Dock Guy',
'password': '<PASSWORD>'
}
response = self.client.patch(USER_DETAILS_URL, payload)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
| [
"rest_framework.test.APIClient",
"django.contrib.auth.get_user_model",
"django.urls.reverse"
] | [((209, 232), 'django.urls.reverse', 'reverse', (['"""users:create"""'], {}), "('users:create')\n", (216, 232), False, 'from django.urls import reverse\n'), ((252, 287), 'django.urls.reverse', 'reverse', (['"""users:details_and_update"""'], {}), "('users:details_and_update')\n", (259, 287), False, 'from django.urls import reverse\n'), ((305, 327), 'django.urls.reverse', 'reverse', (['"""users:token"""'], {}), "('users:token')\n", (312, 327), False, 'from django.urls import reverse\n'), ((543, 554), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (552, 554), False, 'from rest_framework.test import APIClient\n'), ((4709, 4720), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (4718, 4720), False, 'from rest_framework.test import APIClient\n'), ((368, 384), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (382, 384), False, 'from django.contrib.auth import get_user_model\n'), ((890, 906), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (904, 906), False, 'from django.contrib.auth import get_user_model\n'), ((1804, 1820), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1818, 1820), False, 'from django.contrib.auth import get_user_model\n')] |
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
# 16G, 16P
# Premium = exp(c + a1 * I(distr_dir) + a2 * I(qpt<=3) + a3 * I(legal)) * (1 + plia_acc) * Insured_Amount_adj(IAs)
# 29B, 29K
# Premium = exp(c + a1 * I(distr_dir) + a2 * I(qpt<=3) + a3 * I(legal)) * Insured_Amount_adj(IAs)
# 05N, 09@
# Premium = exp(c + a1 * I(distr_dir) + a2 * I4(year_label) + a3 * I32(thf_factor)) * IA3 * f(Coverage)
# 20K
# Premium = exp(c + a1 * I(distr_dir) + a2 * (I(legal))
# 18@
# Premium = exp(c + a1 * I(distr_dir)) * Insured_Amount_adj(IAs)
# 04M, 05E(partially explained)
# Premium = exp(c + a1 * I(distr_dir) + a2 * f(vehicle_code)) * (age_fac * sex_fac - pdmg_acc) * IA3 * f(Coverage)
# Rest Not explained
df_coverage = get_id_aggregated_coverage(df_policy)
def get_id_aggregated_coverage(df_policy):
'''
In:
DataFrame(df_policy),
Out:
DataFrame(agg_coverage),
Description:
get acc adjusted premium
'''
coverage_all = list(df_policy['Insurance_Coverage'].value_counts().index)
# group 1:
coverage_g1 = ['16G', '16P']
df_g1 = df_policy[df_policy['Insurance_Coverage'].isin(coverage_g1)]
df_g1['premium_acc_adj'] = df_g1['Premium'] / (1 + df_g1['plia_acc'])
def get_age_label_id(ibirth):
if pd.isnull(ibirth):
return 1
else:
age = 2015 - int(ibirth[3:]);
if age < 25:
return 1.74
elif age < 30:
return 1.15
elif age < 60:
return 1
elif age < 70:
return 1.07
else:
return 1.07
# group 2:
coverage_g2 = ['04M', '05E', '55J']
df_g2 = df_policy[df_policy['Insurance_Coverage'].isin(coverage_g2)]
g2_age_fac = df_g2['ibirth'].map(get_age_label_id)
g2_sex_fac = df_g2['fsex'].map(lambda x: 0.9 if x == '2' else 1)
df_g2['premium_acc_adj'] = df_g2['Premium'] / (g2_age_fac * g2_sex_fac + df_g2['pdmg_acc'])
# group 3:
#coverage_g3 = ['29B', '29K', '5N', '20K', '18@', '09@', '12L', '15F']
coverage_g3 = [cov for cov in coverage_all if ((cov not in coverage_g1) & (cov not in coverage_g2))]
df_g3 = df_policy[df_policy['Insurance_Coverage'].isin(coverage_g3)]
df_g3['premium_acc_adj'] = df_g3['Premium']
df_coverage = pd.concat([df_g1, df_g2, df_g3])
# aggregate coverage
map_agg_premium = {'車損': 'Dmg',
'竊盜': 'Thf',
'車責': 'Lia'}
# 1. group premium by Main_Insurance_Coverage_Group
agg_premium = df_coverage[['Main_Insurance_Coverage_Group', 'premium_acc_adj']]
agg_premium['Main_Insurance_Coverage_Group'] = agg_premium['Main_Insurance_Coverage_Group'].map(map_agg_premium)
agg_premium = agg_premium.set_index(['Main_Insurance_Coverage_Group'], append = True)
agg_premium = agg_premium.groupby(level=[0,1]).agg({'premium_acc_adj': np.sum})
# 2. aggregate at policy level
agg_premium = agg_premium.unstack(level=1)
agg_premium.columns = ['cpremium_dmg_acc_adj', 'cpremium_lia_acc_adj', 'cpremium_acc_adj_thf']
return(agg_premium[['cpremium_dmg_acc_adj', 'cpremium_lia_acc_adj']]) | [
"pandas.isnull",
"pandas.concat"
] | [((2347, 2379), 'pandas.concat', 'pd.concat', (['[df_g1, df_g2, df_g3]'], {}), '([df_g1, df_g2, df_g3])\n', (2356, 2379), True, 'import pandas as pd\n'), ((1304, 1321), 'pandas.isnull', 'pd.isnull', (['ibirth'], {}), '(ibirth)\n', (1313, 1321), True, 'import pandas as pd\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from epidag.monitor import Monitor
from epidag.fitting import BayesianModel
from epidag.bayesnet import Chromosome
__author__ = 'TimeWz667'
__all__ = ['Fitter']
class Fitter(metaclass=ABCMeta):
def __init__(self, name_logger, **kwargs):
self.Monitor = Monitor(name_logger)
self.Parameters = dict(kwargs)
def needs_exact_likelihood(self):
return False
def is_updatable(self):
return False
def set_log_path(self, filename):
self.Monitor.set_log_path(filename=filename)
def info(self, msg):
self.Monitor.info(msg)
def error(self, msg):
self.Monitor.info(msg)
@abstractmethod
def fit(self, model: BayesianModel, **kwargs):
pass
def update(self, res, **kwargs):
if not self.is_updatable():
raise AttributeError('No update scheme available')
class Fitter1(metaclass=ABCMeta):
DefaultParameters = {
'n_population': 1000,
'm_prior_drop': 10,
'target': 'MLE'
}
def __init__(self, model: BayesianModel, **kwargs):
self.Model = model
self.Monitor = Monitor(model.BN.Name)
self.Parameters = dict(self.DefaultParameters)
self.update_parameters(**kwargs)
self.Prior = list()
self.Posterior = list()
def __getitem__(self, item):
return self.Parameters[item]
def update_parameters(self, **kwargs):
new = {k: v for k, v in kwargs.items() if k in self.Parameters}
self.Parameters.update(new)
def renew_parameters(self):
self.Parameters = dict(self.DefaultParameters)
def initialise_prior(self, n: int=0):
self.Prior.clear()
if n <= 0:
n = self['n_population']
m = self['m_prior_drop'] * n
drop = 0
while len(self.Prior) < n:
p = self.Model.sample_prior()
li = self.Model.evaluate_likelihood(p)
if np.isfinite(li):
self.Prior.append(p)
else:
drop += 1
if drop >= m:
self.error('Too many infinite likelihood in the space')
raise AttributeError('Parameter space might not well-defined')
pr_drop = drop/(len(self.Prior) + drop) * 100
self.info('Prior parameters generated with {:.1f}% dropouts'.format(pr_drop))
def prior_to_df(self):
return Chromosome.to_data_frame(self.Prior)
def summarise_prior(self):
print(self.prior_to_df().describe())
def prior_to_json(self, file):
df = self.posterior_to_df()
df.to_json(file, orient='records')
def prior_to_csv(self, file):
df = self.posterior_to_df()
df.to_csv(file)
def posterior_to_df(self):
return Chromosome.to_data_frame(self.Posterior)
def summarise_posterior(self):
print(self.posterior_to_df().describe())
def posterior_to_json(self, file):
df = self.posterior_to_df()
df.to_json(file, orient='records')
def posterior_to_csv(self, file):
df = self.posterior_to_df()
df.to_csv(file)
class EvolutionaryFitter(Fitter, metaclass=ABCMeta):
DefaultParameters = dict()
DefaultParameters['max_generation'] = 30
DefaultParameters['n_update'] = 10
def __init__(self, model, **kwargs):
Fitter.__init__(self, model, **kwargs)
self.BestFit = self.Model.sample_prior()
self.Generation = 0
self.Stay = 0
def find_best(self):
if self.Parameters['target'] == 'MLE':
key = lambda x: x.LogLikelihood
x0 = self.BestFit.LogLikelihood
else:
key = lambda x: x.LogPosterior
x0 = self.BestFit.LogPosterior
if self.Posterior:
self.BestFit = max(self.Posterior, key=key)
else:
self.error('No parameters found')
if self.Parameters['target'] == 'MLE':
x1 = self.BestFit.LogLikelihood
else:
x1 = self.BestFit.LogPosterior
if x1 == x0:
self.Stay += 1
else:
self.Stay = 0
def fit(self, **kwargs):
if kwargs:
self.update_parameters(**kwargs)
self.info('Parameters updated')
mg = self.Parameters['max_generation']
self.genesis()
for i in range(mg):
self.Generation += 1
self.step()
self.find_best()
if self.termination():
self.info('Termination criteria reached')
break
if self.Generation <= mg:
self.info('Max generation reached')
break
self.info('Fitting completed')
@abstractmethod
def step(self):
pass
def genesis(self):
self.Generation = 0
self.Stay = 0
self.info('Genesis')
self.initialise_prior()
self.Posterior = list(self.Prior)
self.find_best()
@abstractmethod
def termination(self):
pass
def keep_record(self):
if self.Parameters['target'] == 'MLE':
ma = self.BestFit.LogLikelihood
vs = [p.LogLikelihood for p in self.Posterior]
else:
ma = self.BestFit.LogPosterior
vs = [p.LogPosterior for p in self.Posterior]
self.Monitor.keep(
Max=ma,
Mean=logsumexp(vs),
Stay=self.Stay
)
self.Monitor.step(self.Generation)
def update(self, **kwargs):
self.update_parameters(**kwargs)
self.info('Parameters updated')
n_update = self.Parameters['n_update']
gen = self.Generation + n_update
for i in range(n_update):
self.Generation += 1
self.step()
self.find_best()
if self.Generation <= gen:
self.info('Max generation reached')
break
self.info('Update completed')
| [
"epidag.bayesnet.Chromosome.to_data_frame",
"numpy.isfinite",
"scipy.special.logsumexp",
"epidag.monitor.Monitor"
] | [((362, 382), 'epidag.monitor.Monitor', 'Monitor', (['name_logger'], {}), '(name_logger)\n', (369, 382), False, 'from epidag.monitor import Monitor\n'), ((1220, 1242), 'epidag.monitor.Monitor', 'Monitor', (['model.BN.Name'], {}), '(model.BN.Name)\n', (1227, 1242), False, 'from epidag.monitor import Monitor\n'), ((2507, 2543), 'epidag.bayesnet.Chromosome.to_data_frame', 'Chromosome.to_data_frame', (['self.Prior'], {}), '(self.Prior)\n', (2531, 2543), False, 'from epidag.bayesnet import Chromosome\n'), ((2878, 2918), 'epidag.bayesnet.Chromosome.to_data_frame', 'Chromosome.to_data_frame', (['self.Posterior'], {}), '(self.Posterior)\n', (2902, 2918), False, 'from epidag.bayesnet import Chromosome\n'), ((2036, 2051), 'numpy.isfinite', 'np.isfinite', (['li'], {}), '(li)\n', (2047, 2051), True, 'import numpy as np\n'), ((5489, 5502), 'scipy.special.logsumexp', 'logsumexp', (['vs'], {}), '(vs)\n', (5498, 5502), False, 'from scipy.special import logsumexp\n')] |
import pygame
from components import Btn, ImageBtn, Text, Line, Image, Stage
from music import RenderedScore
#This class plays out the game mode
class GameScore(RenderedScore):
"""
[__init__ self note_imgs player key_input score] creates a new
game mode using the specified
[note_imgs] a NoteImgCache object
[player] an AudioPlayer object
[key_input] either a KeyboardInput / BtnInput
[score] a Score object
"""
def __init__(self, note_imgs, player, key_input, score = None):
super().__init__(note_imgs, player, score)
#Leave previous notes as is
self.mark_black = False
#Don't play notes on advance
self.play_notes = False
#Various parameters
#tolerance of 0.2 notes
self.early_tolerance = 0.2
self.colors['green'] = (14, 230, 71)
self.colors['red'] = (224, 9, 9)
#Take in key inputs
self.key_input = key_input
self.playable_pitches = key_input.get_playable_pitches()
#Set of currently played pitches
self.played_pitches = set()
#Various parameters
self.early_notes = 0
self.wrong_notes = 0
self.frames_used = 0
#FSM State
self.WAITING = 0
self.PLAYING = 1
self.fsm_state = self.WAITING
self.quit = False
#Construct buttons
self.exit_btn = Btn("Exit", (40, 200), on_click = \
self.on_exit_btn_click)
self.stage.add_btn(self.exit_btn)
#[on_exit_btn_click self btn pos] is called when the exit
#button is called
def on_exit_btn_click(self, btn, pos):
self.quit = True
info = self.parent_screen.get_info()
#Remove these information from info
remove_elems = ["early_notes", "wrong_notes", "frames_used"]
for elem in remove_elems:
if elem in info:
info.pop(elem)
#[on_early_note_release self timing_jump] is triggered when a note is
#released early by the player. [timing_jump] is the amount of crotchets
#missed when we jump to the next note.
def on_early_note_release(self, timing_jump):
if timing_jump >= self.early_tolerance:
#print("Early Note!!")
self.early_notes += 1
#Stop all non playable notes
#[on_note_stop self pitches treble] is called when we transition
#between bars or between notes. [pitches] refer to the pitches which
#are stopped and [treble] refers to the clef (Treble if True, Bass if False)
def on_note_stop(self, pitches, treble):
#Change to waiting state
self.fsm_state = self.WAITING
#print("Stopping")
#Discard all played pitches
for pitch in pitches:
if pitch in self.played_pitches:
self.played_pitches.remove(pitch)
#Stop all non playable notes
if pitch not in self.playable_pitches:
self.player.stop_note([pitch])
#[advance_time self fps] steps through one frame at [fps] frames
#per second. This causes the playback to advance according to
#[self.advance_rate]
def advance_time(self, fps):
self.frames_used += 1
#Consume updates from input
#Use input directly when paused
#Get current pitches
if self.has_quit():
return
treble_pitches = self.get_curr_pitches(True)
bass_pitches = self.get_curr_pitches(False)
expected_pitches = treble_pitches + bass_pitches
self.key_input.poll()
updates = self.key_input.get_updates()
for pitch, is_pressed in updates.items():
#print("Update: {}, {}".format(pitch, is_pressed))
if is_pressed:
self.played_pitches.add(pitch)
self.player.play_note([pitch])
if pitch not in expected_pitches:
#print("Wrong Note!!")
self.wrong_notes += 1
elif pitch in self.played_pitches:
self.played_pitches.remove(pitch)
self.player.stop_note([pitch])
else:
self.player.stop_note([pitch])
corr_pitches = set()
missing_pitches = set()
extra_pitches = set()
unplayable_pitches = set()
for pitch in expected_pitches:
if pitch not in self.playable_pitches:
corr_pitches.add(pitch)
unplayable_pitches.add(pitch)
elif pitch in self.played_pitches:
corr_pitches.add(pitch)
else:
missing_pitches.add(pitch)
for pitch in self.played_pitches:
if pitch not in expected_pitches:
extra_pitches.add(pitch)
#transition into playing state if every note is good
self.change_curr_pitch_color(corr_pitches, self.colors['green'])
self.change_curr_pitch_color(missing_pitches, self.colors['red'])
if self.fsm_state == self.WAITING and len(missing_pitches) == 0 \
and len(extra_pitches) == 0:
self.fsm_state = self.PLAYING
#Play any non playable notes
for pitch in unplayable_pitches:
self.player.play_note([pitch])
elif self.fsm_state == self.PLAYING and len(missing_pitches) != 0:
#print("Early Stop!!")
self.early_notes += 1
self.fsm_state = self.WAITING
#Stop unplayable pitches
for pitch in unplayable_pitches:
self.player.stop_note([pitch])
self.jump_to_next_timing()
super().advance_time(fps)
elif self.fsm_state == self.PLAYING:
super().advance_time(fps)
if super().has_quit():
info = self.parent_screen.get_info()
info["early_notes"] = self.early_notes
info["wrong_notes"] = self.wrong_notes
info["frames_used"] = self.frames_used
#[has_quit self] queries whether this score has quitted
def has_quit(self):
if super().has_quit():
return True
else:
return self.quit | [
"components.Btn"
] | [((1206, 1261), 'components.Btn', 'Btn', (['"""Exit"""', '(40, 200)'], {'on_click': 'self.on_exit_btn_click'}), "('Exit', (40, 200), on_click=self.on_exit_btn_click)\n", (1209, 1261), False, 'from components import Btn, ImageBtn, Text, Line, Image, Stage\n')] |
"""
PPA Step-2: mini_batch K-means on ending points
Author: <NAME> (<EMAIL>)
Last update: 2020-04-04
"""
import numpy as np
import os
import sys
import fnmatch
from scipy.io import loadmat
from sklearn.cluster import MiniBatchKMeans
"""
installed all the libraries above
"""
home_dir = './'
input_dir = home_dir + 'ending_point'
output_dir = home_dir + 'clustering'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
n_clusters = int(sys.argv[1])
# screen all fiber tracking files
sub_id_list = np.loadtxt(home_dir + 'subjects_0.txt')
n = len(sub_id_list)
n_tracts = np.loadtxt(input_dir + '/' + 'n_tracts.txt')
n_tract_all = int(np.sum(n_tracts))
# print(n_tract_all)
end_points_all = np.zeros(shape=(n_tract_all, 6))
print('load the ending point file \n')
idx_1 = 0
idx_2 = n_tracts[0]
for k in range(n-1):
file_list = fnmatch.filter(os.listdir(input_dir), str(int(sub_id_list[k])) + '*.mat')
file_name = file_list[0]
file_path = input_dir + '/' + file_name
# sub_id = file_name[:-4]
mat = loadmat(file_path)
end_points = mat['end_points']
# print(end_points.shape)
end_points_all[int(idx_1):int(idx_2), :] = end_points
idx_1 = idx_1 + n_tracts[k]
idx_2 = idx_2 + n_tracts[k + 1]
del end_points
kmeans = MiniBatchKMeans(n_clusters=n_clusters, batch_size=1000)
# Fitting the input data
kmeans = kmeans.fit(end_points_all)
labels = kmeans.predict(end_points_all)
cluster_means = kmeans.cluster_centers_
w = np.zeros(shape=(n, n_clusters))
idx_1 = 0
idx_2 = n_tracts[0]
labels_i = labels[int(idx_1):int(idx_2)]
for j in range(n_clusters):
w[0, j] = np.mean(1*(labels_i == j))
for i in range(n-1):
idx_1 = idx_1 + n_tracts[i]
idx_2 = idx_2 + n_tracts[i + 1]
labels_i = labels[int(idx_1):int(idx_2)]
for j in range(n_clusters):
w[i+1, j] = np.mean(1*(labels_i == j))
np.savetxt(output_dir + '/' + 'cluster_w_' + str(n_clusters) + '.txt', w)
np.savetxt(output_dir + '/' + 'cluster_label_' + str(n_clusters) + '.txt', labels)
np.savetxt(output_dir + '/' + 'cluster_means_' + str(n_clusters) + '.txt', cluster_means)
| [
"os.path.exists",
"numpy.mean",
"os.listdir",
"sklearn.cluster.MiniBatchKMeans",
"scipy.io.loadmat",
"numpy.sum",
"numpy.zeros",
"os.mkdir",
"numpy.loadtxt"
] | [((509, 548), 'numpy.loadtxt', 'np.loadtxt', (["(home_dir + 'subjects_0.txt')"], {}), "(home_dir + 'subjects_0.txt')\n", (519, 548), True, 'import numpy as np\n'), ((582, 626), 'numpy.loadtxt', 'np.loadtxt', (["(input_dir + '/' + 'n_tracts.txt')"], {}), "(input_dir + '/' + 'n_tracts.txt')\n", (592, 626), True, 'import numpy as np\n'), ((701, 733), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_tract_all, 6)'}), '(shape=(n_tract_all, 6))\n', (709, 733), True, 'import numpy as np\n'), ((1268, 1323), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'n_clusters', 'batch_size': '(1000)'}), '(n_clusters=n_clusters, batch_size=1000)\n', (1283, 1323), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((1470, 1501), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n_clusters)'}), '(shape=(n, n_clusters))\n', (1478, 1501), True, 'import numpy as np\n'), ((377, 403), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (391, 403), False, 'import os\n'), ((409, 429), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (417, 429), False, 'import os\n'), ((645, 661), 'numpy.sum', 'np.sum', (['n_tracts'], {}), '(n_tracts)\n', (651, 661), True, 'import numpy as np\n'), ((1029, 1047), 'scipy.io.loadmat', 'loadmat', (['file_path'], {}), '(file_path)\n', (1036, 1047), False, 'from scipy.io import loadmat\n'), ((1615, 1643), 'numpy.mean', 'np.mean', (['(1 * (labels_i == j))'], {}), '(1 * (labels_i == j))\n', (1622, 1643), True, 'import numpy as np\n'), ((856, 877), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (866, 877), False, 'import os\n'), ((1828, 1856), 'numpy.mean', 'np.mean', (['(1 * (labels_i == j))'], {}), '(1 * (labels_i == j))\n', (1835, 1856), True, 'import numpy as np\n')] |
""" Break raw data into attributes """
import re
from typing import Any
from src.parse.parsers.header import HeaderParser
from src.parse.parsers.charity import CharityParser
from src.parse.parsers.income import IncomeParser
from src.parse.parsers.asset import AssetParser
from src.parse.parsers.ptr import PTRParser
from src.parse.parsers.transaction import TransactionParser
from src.parse.parsers.gift import GiftParser
from src.parse.parsers.travel import TravelParser
from src.parse.parsers.liability import LiabilityParser
from src.parse.parsers.position import PositionParser
from src.parse.parsers.agreement import AgreementParser
from src.parse.parsers.compensation import CompensationParser
from src.parse.parsers.comment import CommentParser
RAW_DOCUMENT_EXPRESSION = r'view/(.*?)/(?:regular/)?(.*?)/".*?>(.*?)</a>'
class Parse: # pylint: disable=too-many-instance-attributes
""" Given text, produce attributes """
def __init__(self) -> None:
self.re_document_link = None
self.header_parser = HeaderParser()
self.charity_parser = CharityParser()
self.income_parser = IncomeParser()
self.asset_parser = AssetParser()
self.ptr_parser = PTRParser()
self.transaction_parser = TransactionParser()
self.gift_parser = GiftParser()
self.travel_parser = TravelParser()
self.liability_parser = LiabilityParser()
self.position_parser = PositionParser()
self.agreement_parser = AgreementParser()
self.compensation_parser = CompensationParser()
self.comment_parser = CommentParser()
def parse_header(self, key: int, text: str):
return self.header_parser.parse(key, text)
def parse_charity(self, key: int, text: str):
return self.charity_parser.parse(key, text)
def parse_income(self, key: int, text: str):
return self.income_parser.parse(key, text)
def parse_asset(self, key: int, text: str):
return self.asset_parser.parse(key, text)
def parse_ptr(self, key: int, text: str):
return self.ptr_parser.parse(key, text)
def parse_transaction(self, key: int, text: str):
return self.transaction_parser.parse(key, text)
def parse_gift(self, key: int, text: str):
return self.gift_parser.parse(key, text)
def parse_travel(self, key: int, text: str):
return self.travel_parser.parse(key, text)
def parse_liability(self, key: int, text: str):
return self.liability_parser.parse(key, text)
def parse_position(self, key: int, text: str):
return self.position_parser.parse(key, text)
def parse_agreement(self, key: int, text: str):
return self.agreement_parser.parse(key, text)
def parse_compensation(self, key: int, text: str):
return self.compensation_parser.parse(key, text)
def parse_comment(self, key: int, text: str):
return self.comment_parser.parse(key, text)
def __document_link_regex(self):
""" Produce a compiled regular expression """
if not self.re_document_link:
self.re_document_link = re.compile(RAW_DOCUMENT_EXPRESSION)
return self.re_document_link
def document_link_parse(self, document_link: str) -> Any:
""" Break document link into the underlying data
Args:
document_link: str - A web link with a title
Returns:
(document_type, document_id, document_name)
"""
pattern = self.__document_link_regex()
match = pattern.search(document_link)
return match.groups()
| [
"src.parse.parsers.header.HeaderParser",
"src.parse.parsers.comment.CommentParser",
"src.parse.parsers.travel.TravelParser",
"src.parse.parsers.liability.LiabilityParser",
"src.parse.parsers.position.PositionParser",
"src.parse.parsers.agreement.AgreementParser",
"src.parse.parsers.compensation.CompensationParser",
"re.compile",
"src.parse.parsers.gift.GiftParser",
"src.parse.parsers.income.IncomeParser",
"src.parse.parsers.ptr.PTRParser",
"src.parse.parsers.charity.CharityParser",
"src.parse.parsers.transaction.TransactionParser",
"src.parse.parsers.asset.AssetParser"
] | [((1033, 1047), 'src.parse.parsers.header.HeaderParser', 'HeaderParser', ([], {}), '()\n', (1045, 1047), False, 'from src.parse.parsers.header import HeaderParser\n'), ((1078, 1093), 'src.parse.parsers.charity.CharityParser', 'CharityParser', ([], {}), '()\n', (1091, 1093), False, 'from src.parse.parsers.charity import CharityParser\n'), ((1123, 1137), 'src.parse.parsers.income.IncomeParser', 'IncomeParser', ([], {}), '()\n', (1135, 1137), False, 'from src.parse.parsers.income import IncomeParser\n'), ((1166, 1179), 'src.parse.parsers.asset.AssetParser', 'AssetParser', ([], {}), '()\n', (1177, 1179), False, 'from src.parse.parsers.asset import AssetParser\n'), ((1206, 1217), 'src.parse.parsers.ptr.PTRParser', 'PTRParser', ([], {}), '()\n', (1215, 1217), False, 'from src.parse.parsers.ptr import PTRParser\n'), ((1252, 1271), 'src.parse.parsers.transaction.TransactionParser', 'TransactionParser', ([], {}), '()\n', (1269, 1271), False, 'from src.parse.parsers.transaction import TransactionParser\n'), ((1299, 1311), 'src.parse.parsers.gift.GiftParser', 'GiftParser', ([], {}), '()\n', (1309, 1311), False, 'from src.parse.parsers.gift import GiftParser\n'), ((1341, 1355), 'src.parse.parsers.travel.TravelParser', 'TravelParser', ([], {}), '()\n', (1353, 1355), False, 'from src.parse.parsers.travel import TravelParser\n'), ((1388, 1405), 'src.parse.parsers.liability.LiabilityParser', 'LiabilityParser', ([], {}), '()\n', (1403, 1405), False, 'from src.parse.parsers.liability import LiabilityParser\n'), ((1437, 1453), 'src.parse.parsers.position.PositionParser', 'PositionParser', ([], {}), '()\n', (1451, 1453), False, 'from src.parse.parsers.position import PositionParser\n'), ((1486, 1503), 'src.parse.parsers.agreement.AgreementParser', 'AgreementParser', ([], {}), '()\n', (1501, 1503), False, 'from src.parse.parsers.agreement import AgreementParser\n'), ((1539, 1559), 'src.parse.parsers.compensation.CompensationParser', 'CompensationParser', ([], {}), '()\n', (1557, 1559), False, 'from src.parse.parsers.compensation import CompensationParser\n'), ((1590, 1605), 'src.parse.parsers.comment.CommentParser', 'CommentParser', ([], {}), '()\n', (1603, 1605), False, 'from src.parse.parsers.comment import CommentParser\n'), ((3115, 3150), 're.compile', 're.compile', (['RAW_DOCUMENT_EXPRESSION'], {}), '(RAW_DOCUMENT_EXPRESSION)\n', (3125, 3150), False, 'import re\n')] |
from __future__ import annotations
from prettyqt import core, widgets
from prettyqt.qt import QtWidgets
class CollapsibleFrame(widgets.Frame):
expanded = core.Signal()
collapsed = core.Signal()
def __init__(self, parent: QtWidgets.QWidget | None = None):
super().__init__(parent)
self.set_frame_shape("styled_panel")
self.set_frame_shadow("plain")
# layout
self._layout = widgets.BoxLayout("vertical")
self._layout.set_margin(0)
self._layout.setSpacing(0)
self.setLayout(self._layout)
# button
self._button = widgets.ToolButton(self)
self._button.set_arrow_type("right")
self._button.set_style("text_beside_icon")
self._button.setAutoRaise(False)
self._button.set_text("CollapsibleFrame")
self.set_size_policy("minimum_expanding", "fixed")
self._layout.addWidget(self._button, 0)
self._button.setVisible(True)
# group box
self._panel = widgets.Widget(self)
self._layout.addWidget(self._panel)
self._panel.setVisible(False)
self._panel_layout = widgets.BoxLayout("vertical")
self._panel_layout.set_margin(1)
self._panel_layout.setSpacing(2)
self._panel.setLayout(self._panel_layout)
# connect signals
self._button.clicked.connect(self.on_button_click)
# private state variables
self._is_collapsed = True
def set_title(self, title: str):
self._button.set_text(title)
def add_widget(self, widget: widgets.Widget):
self._panel_layout.addWidget(widget)
def remove_widget(self, widget: widgets.Widget):
self._panel_layout.removeWidget(widget)
def is_expanded(self) -> bool:
return not self._is_collapsed
def expand(self):
self._button.set_arrow_type("down")
self._panel.setVisible(True)
self._is_collapsed = False
self.set_size_policy("minimum_expanding", "minimum_expanding")
def collapse(self):
self._panel.setVisible(False)
self._button.set_arrow_type("right")
self._is_collapsed = True
self.set_size_policy("preferred", "preferred")
@core.Slot()
def on_button_click(self):
if self._is_collapsed:
self.expand()
self.expanded.emit()
else:
self.collapse()
self.collapsed.emit()
if __name__ == "__main__":
app = widgets.app()
widget = CollapsibleFrame()
widget.add_widget(widgets.Label("test"))
widget.show()
app.main_loop()
| [
"prettyqt.core.Signal",
"prettyqt.widgets.Widget",
"prettyqt.core.Slot",
"prettyqt.widgets.BoxLayout",
"prettyqt.widgets.app",
"prettyqt.widgets.Label",
"prettyqt.widgets.ToolButton"
] | [((162, 175), 'prettyqt.core.Signal', 'core.Signal', ([], {}), '()\n', (173, 175), False, 'from prettyqt import core, widgets\n'), ((192, 205), 'prettyqt.core.Signal', 'core.Signal', ([], {}), '()\n', (203, 205), False, 'from prettyqt import core, widgets\n'), ((2212, 2223), 'prettyqt.core.Slot', 'core.Slot', ([], {}), '()\n', (2221, 2223), False, 'from prettyqt import core, widgets\n'), ((2460, 2473), 'prettyqt.widgets.app', 'widgets.app', ([], {}), '()\n', (2471, 2473), False, 'from prettyqt import core, widgets\n'), ((429, 458), 'prettyqt.widgets.BoxLayout', 'widgets.BoxLayout', (['"""vertical"""'], {}), "('vertical')\n", (446, 458), False, 'from prettyqt import core, widgets\n'), ((606, 630), 'prettyqt.widgets.ToolButton', 'widgets.ToolButton', (['self'], {}), '(self)\n', (624, 630), False, 'from prettyqt import core, widgets\n'), ((1005, 1025), 'prettyqt.widgets.Widget', 'widgets.Widget', (['self'], {}), '(self)\n', (1019, 1025), False, 'from prettyqt import core, widgets\n'), ((1137, 1166), 'prettyqt.widgets.BoxLayout', 'widgets.BoxLayout', (['"""vertical"""'], {}), "('vertical')\n", (1154, 1166), False, 'from prettyqt import core, widgets\n'), ((2528, 2549), 'prettyqt.widgets.Label', 'widgets.Label', (['"""test"""'], {}), "('test')\n", (2541, 2549), False, 'from prettyqt import core, widgets\n')] |
"""
-------------------------------------------------------
verif_answers.py
verifies questions and answers
-------------------------------------------------------
Author: <NAME>
ID: #########
Email: <EMAIL>
__updated__ = "2016-04-15"
-------------------------------------------------------
"""
from Main import create_questions
generated_questions = create_questions()
run = True
while run:
question_cont = input('Question Text: ')
for k in generated_questions:
if question_cont in k.question_text:
print(k)
print("______End of Results_______")
print()
print("Next Question")
print() | [
"Main.create_questions"
] | [((361, 379), 'Main.create_questions', 'create_questions', ([], {}), '()\n', (377, 379), False, 'from Main import create_questions\n')] |
# Author: <NAME>
from Tkinter import *
import math
import random
'''
F = ma
'''
SCREEN_SIZE = [1360,768]
WINDOW_SIZE = [500,500]
TICKER = 30
CIRCLE = 'circle'
RECTANGLE = 'rectangle'
MODE = RECTANGLE # circle | rectangle
def centralize():
return '%dx%d+%d+%d' % (WINDOW_SIZE[0], WINDOW_SIZE[1],
(SCREEN_SIZE[0]/2)-(WINDOW_SIZE[0]/2),
(SCREEN_SIZE[1]/2)-(WINDOW_SIZE[1]/2))
def drawtext(canvas, text):
canvas.create_text(5,5,anchor=NW,text=text,fill='dimgrey')
def clear(canvas):
canvas.delete('all')
class VariableForce:
def __init__(self, *args, **kws):
self.value = kws.get('force', [0.0])
class Particle:
def __init__(self, *args, **kws):
self.mass = kws.get('mass', 1.0)
self.pos = kws.get('pos', [0.0,0.0])
self.vel = kws.get('vel', [0.0,0.0])
self.restitution_coeficient = 0.7 # 1.0 = lossless
def next(self):
self.pos[0] = self.pos[0] + self.vel[0]
self.pos[1] = self.pos[1] + self.vel[1]
def applyforce(self, force):
accel = [force[0]/self.mass, force[1]/self.mass]
self.vel[0] += accel[0]
self.vel[1] += accel[1]
def applyimpulse(self, intensity, time=0.1):
# i = ft
forcex = intensity[0] / float(time)
forcey = intensity[1] / float(time)
self.applyforce([forcex, forcey])
def getmomentum(self):
return [self.vel[0]*self.mass, self.vel[1]*self.mass]
x = property(lambda self : self.pos[0], None, None, 'x')
y = property(lambda self : self.pos[1], None, None, 'y')
class Body(Particle):
def __init__(self, *args, **kws):
self.radius = kws.get('radius', 15.0)
self.color = kws.get('color', '#00aacc')
Particle.__init__(self, *args, **kws)
def draw(self, canvas):
if MODE == CIRCLE:
self.__draw_circle(canvas)
elif MODE == RECTANGLE:
self.__draw_rectangle(canvas)
def __draw_circle(self, canvas):
canvas.create_oval(
self.x-self.radius, self.y-self.radius,
self.x+self.radius, self.y+self.radius,
fill=self.color,outline=self.color
)
def __draw_rectangle(self, canvas):
canvas.create_rectangle(
self.x-self.radius, self.y-self.radius,
self.x+self.radius, self.y+self.radius,
fill=self.color,outline=self.color
)
def colliding(self, particle):
return self.distance(particle) <= (self.radius + particle.radius)
def distance(self, particle):
'''
Return the distance between this and another particle
'''
x = abs(self.x-particle.x)
y = abs(self.y-particle.y)
return math.sqrt(x**2 + y**2)
def inverse(self):
self.vel[0] *= -1
self.vel[1] *= -1
def next(self):
Particle.next(self)
n = False
if (self.pos[0]-self.radius) < 0.0:
self.vel[0] = self.vel[0] * -1 * self.restitution_coeficient
n = True
elif (self.pos[0]+self.radius) > WINDOW_SIZE[0]:
self.vel[0] = self.vel[0] * -1 * self.restitution_coeficient
n = True
if (self.pos[1]-self.radius) < 0.0:
self.vel[1] = self.vel[1] * -1 * self.restitution_coeficient
n = True
elif (self.pos[1]+self.radius) > WINDOW_SIZE[1]:
self.vel[1] = self.vel[1] * -1 * self.restitution_coeficient
n = True
if n:
Particle.next(self)
# threshould
if (self.pos[0]-self.radius) < 0.0:
self.pos[0] = self.radius
elif (self.pos[0]+self.radius) > WINDOW_SIZE[0]:
self.pos[0] = WINDOW_SIZE[0] - self.radius
if (self.pos[1]-self.radius) < 0.0:
self.pos[1] = self.radius
elif (self.pos[1]+self.radius) > WINDOW_SIZE[1]:
self.pos[1] = WINDOW_SIZE[1] - self.radius
class Environment:
def __init__(self, *args, **kws):
self.particles = []
self.forces = []
def next(self):
for particle in self.particles:
for force in self.forces:
if type(force) == VariableForce:
particle.applyforce(force.value)
else:
particle.applyforce(force)
particle.next()
def add(self, particle):
self.particles.append(particle)
env = Environment()
def colliding_with(particle):
'''Returns the objects that are colliding with it'''
for _p in env.particles:
if particle != _p:
if particle.colliding(_p):
# esse inversao nao pode ser feita no dois eixos
particle.inverse() # [fixme]
_p.inverse()
particle.next()
_p.next()
window = Tk()
window.resizable(0,0)
window.bind('<Escape>', lambda e : window.destroy(), '+')
canvas = Canvas(window, bd=0,bg='white',highlightthickness=0,width=WINDOW_SIZE[0],height=WINDOW_SIZE[1])
canvas.grid()
window.geometry(centralize())
env.forces.append([0.0,10.0]) # gravity
env.forces.append([0.0,0.0]) # wind
NUM_PARTICLES = 6
for i in range(NUM_PARTICLES):
kws = {
'pos' : [random.randint(0,WINDOW_SIZE[0]), random.randint(0,WINDOW_SIZE[1])],
'mass' : random.randint(1.0,10.0),
'vel' : [random.randint(-5,5),0],
# 'color' : '#%02x%02x%02x' % (random.randint(0,255),random.randint(0,255),random.randint(0,255))
}
env.add( Body(**kws) )
def _main(*args):
clear(canvas)
for p in env.particles:
colliding_with(p)
p.draw(canvas)
env.next()
txt = 'FORCES:\n'
for force in env.forces:
txt += ' ' + str(force) + '\n'
drawtext(canvas, txt)
'''
if env.particles[0].colliding(env.particles[1]):
print "ok"
env.particles[1].inverse()
env.particles[0].inverse()
env.particles[1].next()
env.particles[0].next()
'''
window.after(TICKER, _main)
_main()
window.mainloop()
| [
"math.sqrt",
"random.randint"
] | [((2406, 2432), 'math.sqrt', 'math.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2415, 2432), False, 'import math\n'), ((4572, 4597), 'random.randint', 'random.randint', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (4586, 4597), False, 'import random\n'), ((4492, 4525), 'random.randint', 'random.randint', (['(0)', 'WINDOW_SIZE[0]'], {}), '(0, WINDOW_SIZE[0])\n', (4506, 4525), False, 'import random\n'), ((4526, 4559), 'random.randint', 'random.randint', (['(0)', 'WINDOW_SIZE[1]'], {}), '(0, WINDOW_SIZE[1])\n', (4540, 4559), False, 'import random\n'), ((4609, 4630), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (4623, 4630), False, 'import random\n')] |
'''
Created on Jun 28, 2017
@author: <NAME>
'''
from threading import Thread
class ServerProcess(Thread):
'''
classdocs
'''
def __init__(self):
Thread.__init__(self, name = "ServerProcess")
def init(self, handler):
self.handler = handler
return True
def run(self):
Thread.run(self) | [
"threading.Thread.__init__",
"threading.Thread.run"
] | [((173, 216), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {'name': '"""ServerProcess"""'}), "(self, name='ServerProcess')\n", (188, 216), False, 'from threading import Thread\n'), ((362, 378), 'threading.Thread.run', 'Thread.run', (['self'], {}), '(self)\n', (372, 378), False, 'from threading import Thread\n')] |
from flask import Flask, request, render_template, send_file
from io import BytesIO
import TrigGen
from datetime import datetime
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/generated-quiz', methods=['GET', 'POST'])
def generated():
### Title is a string used to create worksheet title text.
if "title" in request.form:
title = request.form["title"]
else:
title = "Speed Trig Quiz"
print(title)
### norm is a Boolean for whether the Normal Trig Functions option was selected.
# True if selected
# False if not selected
norm = "norm" in request.form
print(norm)
### reci is a Boolean for whether the Reciprocal Trig Functions option was selected.
# True if selected
# False if not selected
reci = "reci" in request.form
print(reci)
### invnorm is a Boolean for whether the Inverse Normal Trig Functions option was selected.
# True if selected
# False if not selected
invnorm = "invnorm" in request.form
print(invnorm)
### invreci is a Boolean for whether the Inverse Reciprocal Trig Functions option was selected.
# True if selected
# False if not selected
invreci = "invreci" in request.form
print(invreci)
### inc is a Boolean for whether the user wants values above 2π or below 0.
# True if selected
# False if not selected
if "inc" in request.form:
inc = True if request.form["inc"] == "yes" else False
else:
inc = False
print(inc)
timesNewRoman = "timesnewroman" in request.form
### override is a Boolean for whether the user wants exact number or percent chance.
# True if % chance
# False if exact number wanted
if "override" in request.form:
override = True if request.form["override"] == "yes" else False
else:
override = False
print(override)
if "num" in request.form and "chance" in request.form:
num = int(request.form["chance"]) if override else int(request.form["num"])
elif "num" in request.form:
num = int(request.form["num"])
override = False
elif "chance" in request.form:
num = int(request.form["chance"])
override = True
else:
num = 0
print(num)
dl = "dl" in request.form
if app.config['TESTING']:
quiz = TrigGen.test_tex([norm, reci, invnorm, invreci], inc, num, timesNewRoman, override)
if quiz == ('', 204):
return ('', 204)
return send_file(BytesIO(quiz), as_attachment=dl, mimetype="text/x-tex",
attachment_filename="Speed Trig Quiz"+datetime.now().strftime(" %Y-%m-%d at %H.%M.%S.pdf"))
quiz = TrigGen.create_tex(title, [norm, reci, invnorm, invreci], inc, num, timesNewRoman, override)
if quiz == ('', 204):
return ('', 204)
return send_file(BytesIO(bytes(quiz)),
mimetype="application/pdf", as_attachment=dl,
attachment_filename="Speed Trig Quiz"+datetime.now().strftime(" %Y-%m-%d at %H.%M.%S.pdf"))
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True, debug=True) | [
"flask.render_template",
"flask.Flask",
"TrigGen.test_tex",
"io.BytesIO",
"datetime.datetime.now",
"TrigGen.create_tex"
] | [((136, 151), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (141, 151), False, 'from flask import Flask, request, render_template, send_file\n'), ((216, 245), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (231, 245), False, 'from flask import Flask, request, render_template, send_file\n'), ((2760, 2856), 'TrigGen.create_tex', 'TrigGen.create_tex', (['title', '[norm, reci, invnorm, invreci]', 'inc', 'num', 'timesNewRoman', 'override'], {}), '(title, [norm, reci, invnorm, invreci], inc, num,\n timesNewRoman, override)\n', (2778, 2856), False, 'import TrigGen\n'), ((2400, 2487), 'TrigGen.test_tex', 'TrigGen.test_tex', (['[norm, reci, invnorm, invreci]', 'inc', 'num', 'timesNewRoman', 'override'], {}), '([norm, reci, invnorm, invreci], inc, num, timesNewRoman,\n override)\n', (2416, 2487), False, 'import TrigGen\n'), ((2568, 2581), 'io.BytesIO', 'BytesIO', (['quiz'], {}), '(quiz)\n', (2575, 2581), False, 'from io import BytesIO\n'), ((3074, 3088), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3086, 3088), False, 'from datetime import datetime\n'), ((2695, 2709), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2707, 2709), False, 'from datetime import datetime\n')] |
# This file was generated by the Tkinter Designer by <NAME>
# https://github.com/ParthJadhav/Tkinter-Designer
from pathlib import Path
# from tkinter import *
# Explicit imports to satisfy Flake8
from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage
from ip2geotools.databases.noncommercial import DbIpCity
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path("./assets")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
window = Tk()
window.geometry("300x550")
window.configure(bg = "#5DC1C7")
window.title("IP2GEO")
canvas = Canvas(
window,
bg = "#5DC1C7",
height = 550,
width = 300,
bd = 0,
highlightthickness = 0,
relief = "ridge"
)
canvas.place(x = 0, y = 0)
canvas.create_text(
75.0,
26.0,
anchor="nw",
text="IP 2 GEO",
fill="#FFFFFF",
font=("Roboto", 36 * -1)
)
entry_image_1 = PhotoImage(
file=relative_to_assets("entry_1.png"))
entry_bg_1 = canvas.create_image(
150.5,
173.0,
image=entry_image_1
)
entry_1 = Entry(
bd=0,
bg="#E5E5E5",
highlightthickness=0,
fg="black"
)
entry_1.place(
x=52.0,
y=145.0,
width=197.0,
height=54.0
)
canvas.create_text(
36.0,
125.0,
anchor="nw",
text="ENTER IP ADDRESS",
fill="#FCFAFA",
font=("Roboto", 12 * -1)
)
def ip_check():
response = DbIpCity.get(entry_1.get(),api_key='free')
'''
print("IP country:",response.country)
print("IP city:",response.city)
print("IP latitude:",response.latitude)
print("IP longitude:",response.longitude)
'''
co = "IP country: "+response.country
ci = "IP city: " + response.city
li = "IP latitude: "+ str(response.latitude)
lo = "IP longitude: " + str(response.longitude)
country = Label(text=co,bg="#5DC1C7",fg="white")
country.place(x=30,y=350)
city = Label(text=ci,bg="#5DC1C7",fg="white")
city.place(x=30,y=400)
lit = Label(text=li,bg="#5DC1C7",fg="white")
lit.place(x=30,y=450)
lon = Label(text=lo,bg="#5DC1C7",fg="white")
lon.place(x=30,y=500)
button_image_1 = PhotoImage(
file=relative_to_assets("button_1.png"))
button_1 = Button(
image=button_image_1,
borderwidth=0,
highlightthickness=0,
command=lambda: ip_check(),
relief="flat",
)
button_1.place(
x=95.0,
y=250.0,
width=110.0,
height=45.0
)
window.resizable(False, False)
window.mainloop()
| [
"tkinter.Entry",
"pathlib.Path",
"tkinter.Canvas",
"tkinter.Tk",
"tkinter.Label"
] | [((502, 506), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (504, 506), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((601, 700), 'tkinter.Canvas', 'Canvas', (['window'], {'bg': '"""#5DC1C7"""', 'height': '(550)', 'width': '(300)', 'bd': '(0)', 'highlightthickness': '(0)', 'relief': '"""ridge"""'}), "(window, bg='#5DC1C7', height=550, width=300, bd=0,\n highlightthickness=0, relief='ridge')\n", (607, 700), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((1061, 1120), 'tkinter.Entry', 'Entry', ([], {'bd': '(0)', 'bg': '"""#E5E5E5"""', 'highlightthickness': '(0)', 'fg': '"""black"""'}), "(bd=0, bg='#E5E5E5', highlightthickness=0, fg='black')\n", (1066, 1120), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((343, 357), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (347, 357), False, 'from pathlib import Path\n'), ((393, 409), 'pathlib.Path', 'Path', (['"""./assets"""'], {}), "('./assets')\n", (397, 409), False, 'from pathlib import Path\n'), ((1808, 1848), 'tkinter.Label', 'Label', ([], {'text': 'co', 'bg': '"""#5DC1C7"""', 'fg': '"""white"""'}), "(text=co, bg='#5DC1C7', fg='white')\n", (1813, 1848), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((1888, 1928), 'tkinter.Label', 'Label', ([], {'text': 'ci', 'bg': '"""#5DC1C7"""', 'fg': '"""white"""'}), "(text=ci, bg='#5DC1C7', fg='white')\n", (1893, 1928), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((1964, 2004), 'tkinter.Label', 'Label', ([], {'text': 'li', 'bg': '"""#5DC1C7"""', 'fg': '"""white"""'}), "(text=li, bg='#5DC1C7', fg='white')\n", (1969, 2004), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((2039, 2079), 'tkinter.Label', 'Label', ([], {'text': 'lo', 'bg': '"""#5DC1C7"""', 'fg': '"""white"""'}), "(text=lo, bg='#5DC1C7', fg='white')\n", (2044, 2079), False, 'from tkinter import Label, Tk, Canvas, Entry, Text, Button, PhotoImage\n'), ((480, 490), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (484, 490), False, 'from pathlib import Path\n')] |
"""
Tests around our XML modulestore, including importing
well-formed and not-well-formed XML.
"""
import os.path
from glob import glob
from unittest.mock import Mock, patch
import pytest
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
from xmodule.modulestore.tests.utils import TILDA_FILES_DICT, add_temp_files_from_dict, remove_temp_files_from_list
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.tests import DATA_DIR
from xmodule.x_module import XModuleMixin
def glob_tildes_at_end(path):
"""
A wrapper for the `glob.glob` function, but it always returns
files that end in a tilde (~) at the end of the list of results.
"""
result = glob(path)
with_tildes = [f for f in result if f.endswith("~")]
no_tildes = [f for f in result if not f.endswith("~")]
return no_tildes + with_tildes
class TestXMLModuleStore(TestCase):
"""
Test around the XML modulestore
"""
@patch('xmodule.tabs.CourseTabList.initialize_default', Mock())
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml"), 'rb') as xmlf:
xml = xmlf.read()
with pytest.raises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(
DATA_DIR,
source_dirs=['toy'],
xblock_mixins=(XModuleMixin,),
load_error_modules=False)
# Look up the errors during load. There should be none.
errors = modulestore.get_course_errors(CourseKey.from_string("edX/toy/2012_Fall"))
assert errors == []
def test_get_courses_for_wiki(self):
"""
Test the get_courses_for_wiki method
"""
store = XMLModuleStore(DATA_DIR, source_dirs=['toy', 'simple'])
for course in store.get_courses():
course_locations = store.get_courses_for_wiki(course.wiki_slug)
assert len(course_locations) == 1
assert course.location.course_key in course_locations
course_locations = store.get_courses_for_wiki('no_such_wiki')
assert len(course_locations) == 0
# now set toy course to share the wiki with simple course
toy_course = store.get_course(CourseKey.from_string('edX/toy/2012_Fall'))
toy_course.wiki_slug = 'simple'
course_locations = store.get_courses_for_wiki('toy')
assert len(course_locations) == 0
course_locations = store.get_courses_for_wiki('simple')
assert len(course_locations) == 2
for course_number in ['toy', 'simple']:
assert CourseKey.from_string('/'.join(['edX', course_number, '2012_Fall'])) in course_locations
def test_has_course(self):
"""
Test the has_course method
"""
check_has_course_method(
XMLModuleStore(DATA_DIR, source_dirs=['toy', 'simple']),
CourseKey.from_string('edX/toy/2012_Fall'),
locator_key_fields=CourseLocator.KEY_FIELDS
)
def test_branch_setting(self):
"""
Test the branch setting context manager
"""
store = XMLModuleStore(DATA_DIR, source_dirs=['toy'])
course = store.get_courses()[0]
# XML store allows published_only branch setting
with store.branch_setting(ModuleStoreEnum.Branch.published_only, course.id):
store.get_item(course.location)
# XML store does NOT allow draft_preferred branch setting
with pytest.raises(ValueError):
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
# verify that the above context manager raises a ValueError
pass # pragma: no cover
@patch('xmodule.modulestore.xml.log')
def test_dag_course(self, mock_logging):
"""
Test a course whose structure is not a tree.
"""
store = XMLModuleStore(
DATA_DIR,
source_dirs=['xml_dag'],
xblock_mixins=(XModuleMixin,),
)
course_key = store.get_courses()[0].id
mock_logging.warning.assert_called_with(
"%s has more than one definition", course_key.make_usage_key('discussion', 'duplicate_def')
)
shared_item_loc = course_key.make_usage_key('html', 'toyhtml')
shared_item = store.get_item(shared_item_loc)
parent = shared_item.get_parent()
assert parent is not None, 'get_parent failed to return a value'
parent_loc = course_key.make_usage_key('vertical', 'vertical_test')
assert parent.location == parent_loc
assert shared_item.location in [x.location for x in parent.get_children()]
# ensure it's still a child of the other parent even tho it doesn't claim the other parent as its parent
other_parent_loc = course_key.make_usage_key('vertical', 'zeta')
other_parent = store.get_item(other_parent_loc)
# children rather than get_children b/c the instance returned by get_children != shared_item
assert shared_item_loc in other_parent.children
class TestModuleStoreIgnore(TestXMLModuleStore): # lint-amnesty, pylint: disable=missing-class-docstring, test-inherits-tests
course_dir = DATA_DIR / "course_ignore"
def setUp(self):
super().setUp()
self.addCleanup(remove_temp_files_from_list, list(TILDA_FILES_DICT.keys()), self.course_dir / "static")
add_temp_files_from_dict(TILDA_FILES_DICT, self.course_dir / "static")
@patch("xmodule.modulestore.xml.glob.glob", side_effect=glob_tildes_at_end)
def test_tilde_files_ignored(self, _fake_glob):
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['course_ignore'], load_error_modules=False)
about_location = CourseKey.from_string('edX/course_ignore/2014_Fall').make_usage_key(
'about', 'index',
)
about_module = modulestore.get_item(about_location)
assert 'GREEN' in about_module.data
assert 'RED' not in about_module.data
| [
"unittest.mock.Mock",
"xmodule.modulestore.tests.utils.add_temp_files_from_dict",
"xmodule.modulestore.tests.utils.TILDA_FILES_DICT.keys",
"xmodule.modulestore.xml.XMLModuleStore",
"pytest.raises",
"opaque_keys.edx.keys.CourseKey.from_string",
"unittest.mock.patch",
"glob.glob"
] | [((885, 895), 'glob.glob', 'glob', (['path'], {}), '(path)\n', (889, 895), False, 'from glob import glob\n'), ((4288, 4324), 'unittest.mock.patch', 'patch', (['"""xmodule.modulestore.xml.log"""'], {}), "('xmodule.modulestore.xml.log')\n", (4293, 4324), False, 'from unittest.mock import Mock, patch\n'), ((6062, 6136), 'unittest.mock.patch', 'patch', (['"""xmodule.modulestore.xml.glob.glob"""'], {'side_effect': 'glob_tildes_at_end'}), "('xmodule.modulestore.xml.glob.glob', side_effect=glob_tildes_at_end)\n", (6067, 6136), False, 'from unittest.mock import Mock, patch\n'), ((1837, 1943), 'xmodule.modulestore.xml.XMLModuleStore', 'XMLModuleStore', (['DATA_DIR'], {'source_dirs': "['toy']", 'xblock_mixins': '(XModuleMixin,)', 'load_error_modules': '(False)'}), "(DATA_DIR, source_dirs=['toy'], xblock_mixins=(XModuleMixin,),\n load_error_modules=False)\n", (1851, 1943), False, 'from xmodule.modulestore.xml import XMLModuleStore\n'), ((1198, 1204), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1202, 1204), False, 'from unittest.mock import Mock, patch\n'), ((2300, 2355), 'xmodule.modulestore.xml.XMLModuleStore', 'XMLModuleStore', (['DATA_DIR'], {'source_dirs': "['toy', 'simple']"}), "(DATA_DIR, source_dirs=['toy', 'simple'])\n", (2314, 2355), False, 'from xmodule.modulestore.xml import XMLModuleStore\n'), ((3695, 3740), 'xmodule.modulestore.xml.XMLModuleStore', 'XMLModuleStore', (['DATA_DIR'], {'source_dirs': "['toy']"}), "(DATA_DIR, source_dirs=['toy'])\n", (3709, 3740), False, 'from xmodule.modulestore.xml import XMLModuleStore\n'), ((4463, 4548), 'xmodule.modulestore.xml.XMLModuleStore', 'XMLModuleStore', (['DATA_DIR'], {'source_dirs': "['xml_dag']", 'xblock_mixins': '(XModuleMixin,)'}), "(DATA_DIR, source_dirs=['xml_dag'], xblock_mixins=(XModuleMixin,)\n )\n", (4477, 4548), False, 'from xmodule.modulestore.xml import XMLModuleStore\n'), ((5985, 6055), 'xmodule.modulestore.tests.utils.add_temp_files_from_dict', 'add_temp_files_from_dict', (['TILDA_FILES_DICT', "(self.course_dir / 'static')"], {}), "(TILDA_FILES_DICT, self.course_dir / 'static')\n", (6009, 6055), False, 'from xmodule.modulestore.tests.utils import TILDA_FILES_DICT, add_temp_files_from_dict, remove_temp_files_from_list\n'), ((6211, 6297), 'xmodule.modulestore.xml.XMLModuleStore', 'XMLModuleStore', (['DATA_DIR'], {'source_dirs': "['course_ignore']", 'load_error_modules': '(False)'}), "(DATA_DIR, source_dirs=['course_ignore'], load_error_modules=\n False)\n", (6225, 6297), False, 'from xmodule.modulestore.xml import XMLModuleStore\n'), ((2101, 2143), 'opaque_keys.edx.keys.CourseKey.from_string', 'CourseKey.from_string', (['"""edX/toy/2012_Fall"""'], {}), "('edX/toy/2012_Fall')\n", (2122, 2143), False, 'from opaque_keys.edx.keys import CourseKey\n'), ((2805, 2847), 'opaque_keys.edx.keys.CourseKey.from_string', 'CourseKey.from_string', (['"""edX/toy/2012_Fall"""'], {}), "('edX/toy/2012_Fall')\n", (2826, 2847), False, 'from opaque_keys.edx.keys import CourseKey\n'), ((3392, 3447), 'xmodule.modulestore.xml.XMLModuleStore', 'XMLModuleStore', (['DATA_DIR'], {'source_dirs': "['toy', 'simple']"}), "(DATA_DIR, source_dirs=['toy', 'simple'])\n", (3406, 3447), False, 'from xmodule.modulestore.xml import XMLModuleStore\n'), ((3461, 3503), 'opaque_keys.edx.keys.CourseKey.from_string', 'CourseKey.from_string', (['"""edX/toy/2012_Fall"""'], {}), "('edX/toy/2012_Fall')\n", (3482, 3503), False, 'from opaque_keys.edx.keys import CourseKey\n'), ((4048, 4073), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4061, 4073), False, 'import pytest\n'), ((1628, 1661), 'pytest.raises', 'pytest.raises', (['UnicodeDecodeError'], {}), '(UnicodeDecodeError)\n', (1641, 1661), False, 'import pytest\n'), ((5923, 5946), 'xmodule.modulestore.tests.utils.TILDA_FILES_DICT.keys', 'TILDA_FILES_DICT.keys', ([], {}), '()\n', (5944, 5946), False, 'from xmodule.modulestore.tests.utils import TILDA_FILES_DICT, add_temp_files_from_dict, remove_temp_files_from_list\n'), ((6318, 6370), 'opaque_keys.edx.keys.CourseKey.from_string', 'CourseKey.from_string', (['"""edX/course_ignore/2014_Fall"""'], {}), "('edX/course_ignore/2014_Fall')\n", (6339, 6370), False, 'from opaque_keys.edx.keys import CourseKey\n')] |
"""
Convenience classes for defining some sliders
"""
from math import radians
import tkinter as tk
class OffsetSlider(object):
""" Sliders for changing the initial pose offsets
"""
def __init__(self, key, bounds, gui):
""" @param key: 'x', 'y', or 'yaw'
@param bounds: [min, max] -> bounds on the offset
@param gui: reference to the TrackBuilderGUI instance
"""
self.gui = gui
self.key = key
self.offset_var = tk.DoubleVar()
self.offset_var.set(gui.initial_pose_offset[key])
self.offset_var.trace(
"w", lambda name, index, mode,
sv=self.offset_var:
self._offset_cb(self.offset_var)
)
resolution = 1 if key == 'yaw' else 0.1
offset_scale = tk.Scale(
gui.top_frame3, from_=bounds[0], to=bounds[1], resolution=resolution,
orient=tk.HORIZONTAL, variable=self.offset_var)
offset_scale.pack(side=tk.LEFT)
def _offset_cb(self, var):
value = self.offset_var.get()
if self.key == 'yaw':
value = radians(value)
self.gui.initial_pose_offset[self.key] = value
self.gui.update_window()
class BasicSlider(object):
""" Basic slider storing one double value
"""
def __init__(self, default_value, bounds, resolution, gui, frame):
""" @param default_value: default value of the slider
@param bounds: [min, max] -> bounds on the offset
@param resolution: resolution of the slider
@param gui: reference to the TrackBuilderGUI instance
@param frame: tk frame in which add the slider
"""
self.gui = gui
self.var = tk.DoubleVar()
self.var.set(default_value)
self.var.trace(
"w", lambda name, index, mode,
sv=self.var: self._callback(self.var)
)
scale = tk.Scale(
frame, from_=bounds[0], to=bounds[1], resolution=resolution,
orient=tk.HORIZONTAL, variable=self.var)
scale.pack(side=tk.LEFT)
def _callback(self, var):
self.gui.update_window()
def get_value(self):
return self.var.get()
| [
"tkinter.Scale",
"math.radians",
"tkinter.DoubleVar"
] | [((495, 509), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {}), '()\n', (507, 509), True, 'import tkinter as tk\n'), ((804, 935), 'tkinter.Scale', 'tk.Scale', (['gui.top_frame3'], {'from_': 'bounds[0]', 'to': 'bounds[1]', 'resolution': 'resolution', 'orient': 'tk.HORIZONTAL', 'variable': 'self.offset_var'}), '(gui.top_frame3, from_=bounds[0], to=bounds[1], resolution=\n resolution, orient=tk.HORIZONTAL, variable=self.offset_var)\n', (812, 935), True, 'import tkinter as tk\n'), ((1733, 1747), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {}), '()\n', (1745, 1747), True, 'import tkinter as tk\n'), ((1928, 2042), 'tkinter.Scale', 'tk.Scale', (['frame'], {'from_': 'bounds[0]', 'to': 'bounds[1]', 'resolution': 'resolution', 'orient': 'tk.HORIZONTAL', 'variable': 'self.var'}), '(frame, from_=bounds[0], to=bounds[1], resolution=resolution,\n orient=tk.HORIZONTAL, variable=self.var)\n', (1936, 2042), True, 'import tkinter as tk\n'), ((1116, 1130), 'math.radians', 'radians', (['value'], {}), '(value)\n', (1123, 1130), False, 'from math import radians\n')] |
import tensorflow as tf
import numpy as np
from visualize import Visualize
import scipy.misc
image = "image.jpg"
def to_label(label):
text_label = ""
for single_label in label:
number = np.argmax(single_label)
if number == 10:
return text_label
else:
text_label += str(number)
return text_label
with tf.Session() as sess:
saver = tf.train.import_meta_graph('SVHN_recognition/checkpoints/SVHN/SVHN-30000.meta')
saver.restore(sess, 'SVHN_recognition/checkpoints/SVHN/SVHN-30000')
graph = tf.get_default_graph()
inputs = graph.get_tensor_by_name("inputs:0")
label = graph.get_tensor_by_name("inference/stack:0")
position = graph.get_tensor_by_name("inference/fc_5/MatMul:0")
input = scipy.misc.imresize(scipy.misc.imread(image), (128, 256))
feed_dict = {inputs: [input]}
label, position = sess.run([label, position], feed_dict)
visualize = Visualize()
visualize.visualize_inference(input, to_label(label[0]), position[0])
| [
"tensorflow.Session",
"numpy.argmax",
"visualize.Visualize",
"tensorflow.train.import_meta_graph",
"tensorflow.get_default_graph"
] | [((365, 377), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (375, 377), True, 'import tensorflow as tf\n'), ((399, 478), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""SVHN_recognition/checkpoints/SVHN/SVHN-30000.meta"""'], {}), "('SVHN_recognition/checkpoints/SVHN/SVHN-30000.meta')\n", (425, 478), True, 'import tensorflow as tf\n'), ((563, 585), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (583, 585), True, 'import tensorflow as tf\n'), ((944, 955), 'visualize.Visualize', 'Visualize', ([], {}), '()\n', (953, 955), False, 'from visualize import Visualize\n'), ((205, 228), 'numpy.argmax', 'np.argmax', (['single_label'], {}), '(single_label)\n', (214, 228), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 14:28:04 2018
@author: <NAME>
"""
from QChemTool.Development.defect import Defect,initialize_defect_database,defects_database
from QChemTool import position_units
from QChemTool import Structure
from QChemTool.QuantumChem.read_mine import read_TrEsp_charges
from QChemTool.QuantumChem.Fluorographene.fluorographene import constrainsFG
import numpy as np
coor,tr_charge,at_type = read_TrEsp_charges("Perylene_transition_TDDFT_fitted_charges_NoH.out")
coor,gr_charge,at_type = read_TrEsp_charges("Perylene_ground_TDDFT_fitted_charges_NoH.out")
coor,ex_charge,at_type = read_TrEsp_charges("Perylene_excited_TDDFT_fitted_charges_NoH.out")
with position_units("Angstrom"):
struc = {"coor": coor, "at_type": at_type}
charges = {"ground": gr_charge, "excited": ex_charge,"transition": tr_charge}
def1 = Defect(struc=struc, charges=charges)
system = Structure()
system.load_xyz("FGrph_1perylene_2dist_ser_TDDFT-wB97XD_geom_BLYP-landl2dz_symm.xyz")
# Get set of defects from structure
indx_FG = constrainsFG(system,border=False,defect=True)
indx_FG = np.array(indx_FG,dtype="i8")
coor = system.coor.value[indx_FG]
at_type = []
for ii in indx_FG:
at_type.append( system.at_type[ii] )
FGdefects = Structure()
FGdefects.add_coor(coor,at_type)
indx_def = FGdefects.count_fragments()
Ndef = len(indx_def)
defects = []
for ii in range(Ndef):
at_type = []
for jj in indx_def[ii]:
at_type.append( FGdefects.at_type[jj] )
struc = {"coor": FGdefects.coor.value[indx_def[ii]], "at_type": at_type}
index = list(indx_FG[ indx_def[ii] ])
defct = Defect(struc=struc, index=index)
defects.append(defct)
print(defects[0].index)
# identify defect
def2 = defects[0]
#index_corr,RSMD = def2.identify_defect(def1)
#def1.output_to_pdb("def1.pdb")
#def2.coor.value = def2.coor.value[index_corr]
#def2.output_to_pdb("def2_reord.pdb")
def2.load_charges_from_defect(def1)
coor,tr_charge,at_type = read_TrEsp_charges("Perylene_transition_TDDFT_fitted_charges_NoH.out")
coor,gr_charge,at_type = read_TrEsp_charges("Perylene_ground_TDDFT_fitted_charges_NoH.out")
coor,ex_charge,at_type = read_TrEsp_charges("Perylene_excited_TDDFT_fitted_charges_NoH.out")
with position_units("Angstrom"):
struc = {"coor": coor, "at_type": at_type}
charges = {"ground": gr_charge, "excited": ex_charge,"transition": tr_charge}
def1 = Defect(struc=struc, charges=charges)
def1.coor.value
defects_database = initialize_defect_database("QC")
print(defects_database.keys())
# FG system automaticaly identify defects - parameters: transition energy type
# Then manually call function assign defects to allow some changes in the database
| [
"QChemTool.QuantumChem.read_mine.read_TrEsp_charges",
"QChemTool.position_units",
"QChemTool.Development.defect.Defect",
"numpy.array",
"QChemTool.QuantumChem.Fluorographene.fluorographene.constrainsFG",
"QChemTool.Development.defect.defects_database.keys",
"QChemTool.Structure",
"QChemTool.Development.defect.initialize_defect_database"
] | [((448, 518), 'QChemTool.QuantumChem.read_mine.read_TrEsp_charges', 'read_TrEsp_charges', (['"""Perylene_transition_TDDFT_fitted_charges_NoH.out"""'], {}), "('Perylene_transition_TDDFT_fitted_charges_NoH.out')\n", (466, 518), False, 'from QChemTool.QuantumChem.read_mine import read_TrEsp_charges\n'), ((545, 611), 'QChemTool.QuantumChem.read_mine.read_TrEsp_charges', 'read_TrEsp_charges', (['"""Perylene_ground_TDDFT_fitted_charges_NoH.out"""'], {}), "('Perylene_ground_TDDFT_fitted_charges_NoH.out')\n", (563, 611), False, 'from QChemTool.QuantumChem.read_mine import read_TrEsp_charges\n'), ((638, 705), 'QChemTool.QuantumChem.read_mine.read_TrEsp_charges', 'read_TrEsp_charges', (['"""Perylene_excited_TDDFT_fitted_charges_NoH.out"""'], {}), "('Perylene_excited_TDDFT_fitted_charges_NoH.out')\n", (656, 705), False, 'from QChemTool.QuantumChem.read_mine import read_TrEsp_charges\n'), ((937, 948), 'QChemTool.Structure', 'Structure', ([], {}), '()\n', (946, 948), False, 'from QChemTool import Structure\n'), ((1086, 1133), 'QChemTool.QuantumChem.Fluorographene.fluorographene.constrainsFG', 'constrainsFG', (['system'], {'border': '(False)', 'defect': '(True)'}), '(system, border=False, defect=True)\n', (1098, 1133), False, 'from QChemTool.QuantumChem.Fluorographene.fluorographene import constrainsFG\n'), ((1143, 1172), 'numpy.array', 'np.array', (['indx_FG'], {'dtype': '"""i8"""'}), "(indx_FG, dtype='i8')\n", (1151, 1172), True, 'import numpy as np\n'), ((1298, 1309), 'QChemTool.Structure', 'Structure', ([], {}), '()\n', (1307, 1309), False, 'from QChemTool import Structure\n'), ((2037, 2107), 'QChemTool.QuantumChem.read_mine.read_TrEsp_charges', 'read_TrEsp_charges', (['"""Perylene_transition_TDDFT_fitted_charges_NoH.out"""'], {}), "('Perylene_transition_TDDFT_fitted_charges_NoH.out')\n", (2055, 2107), False, 'from QChemTool.QuantumChem.read_mine import read_TrEsp_charges\n'), ((2134, 2200), 'QChemTool.QuantumChem.read_mine.read_TrEsp_charges', 'read_TrEsp_charges', (['"""Perylene_ground_TDDFT_fitted_charges_NoH.out"""'], {}), "('Perylene_ground_TDDFT_fitted_charges_NoH.out')\n", (2152, 2200), False, 'from QChemTool.QuantumChem.read_mine import read_TrEsp_charges\n'), ((2227, 2294), 'QChemTool.QuantumChem.read_mine.read_TrEsp_charges', 'read_TrEsp_charges', (['"""Perylene_excited_TDDFT_fitted_charges_NoH.out"""'], {}), "('Perylene_excited_TDDFT_fitted_charges_NoH.out')\n", (2245, 2294), False, 'from QChemTool.QuantumChem.read_mine import read_TrEsp_charges\n'), ((2557, 2589), 'QChemTool.Development.defect.initialize_defect_database', 'initialize_defect_database', (['"""QC"""'], {}), "('QC')\n", (2583, 2589), False, 'from QChemTool.Development.defect import Defect, initialize_defect_database, defects_database\n'), ((714, 740), 'QChemTool.position_units', 'position_units', (['"""Angstrom"""'], {}), "('Angstrom')\n", (728, 740), False, 'from QChemTool import position_units\n'), ((886, 922), 'QChemTool.Development.defect.Defect', 'Defect', ([], {'struc': 'struc', 'charges': 'charges'}), '(struc=struc, charges=charges)\n', (892, 922), False, 'from QChemTool.Development.defect import Defect, initialize_defect_database, defects_database\n'), ((1674, 1706), 'QChemTool.Development.defect.Defect', 'Defect', ([], {'struc': 'struc', 'index': 'index'}), '(struc=struc, index=index)\n', (1680, 1706), False, 'from QChemTool.Development.defect import Defect, initialize_defect_database, defects_database\n'), ((2301, 2327), 'QChemTool.position_units', 'position_units', (['"""Angstrom"""'], {}), "('Angstrom')\n", (2315, 2327), False, 'from QChemTool import position_units\n'), ((2473, 2509), 'QChemTool.Development.defect.Defect', 'Defect', ([], {'struc': 'struc', 'charges': 'charges'}), '(struc=struc, charges=charges)\n', (2479, 2509), False, 'from QChemTool.Development.defect import Defect, initialize_defect_database, defects_database\n'), ((2597, 2620), 'QChemTool.Development.defect.defects_database.keys', 'defects_database.keys', ([], {}), '()\n', (2618, 2620), False, 'from QChemTool.Development.defect import Defect, initialize_defect_database, defects_database\n')] |
#!/usr/bin/python3
import os
import pygit2
import subprocess
import sys
import time
import robodoge
# Script to mass evaluate remaining pull requests, and raise them against Dogecoin
# where feasible.
def build_pr_body(pr_titles, pr_ids):
contents = []
for pr_id in pr_ids:
contents.append(pr_titles[pr_id])
return "Contains:\n\n* " + "\n* ".join(contents)
def mark_commits_merged(conn, merger, new_pr, pr_ids):
cursor = conn.cursor()
try:
# Mark the new PR to the database
robodoge.write_pr(cursor, new_pr, 'dogecoin/dogecoin')
# Mark component PRs done
for pr_id in pr_ids:
cursor.execute("UPDATE pull_request_commit SET merged='t', raised_pr_id=%(raised_pr)s WHERE pr_id=%(pr_id)s", {
'pr_id': pr_id,
'raised_pr': new_pr['id']
})
conn.commit()
finally:
cursor.close()
def raise_pull_request(conn, merger, pr_titles, pr_ids):
repo = merger.repo
title = '[Auto] Bitcoin PR batch %s' % time.asctime()
body = build_pr_body(pr_titles, pr_ids)
# Create new branch
branch_name = 'bitcoin-batch-%d' % int(time.time())
batch_branch = merger.create_branch(branch_name)
repo.checkout(batch_branch)
merger.apply_pull_requests(conn, batch_branch, pr_ids)
# Push branch upstream and raise PR
branch_ref = repo.lookup_reference('refs/heads/' + batch_branch.branch_name)
print('Pushing branch %s to origin' % batch_branch.branch_name)
remote = repo.remotes["origin"]
remote.credentials = pygit2.UserPass(merger.private_token, 'x-oauth-basic')
remote.push([branch_ref.name])
# Raise a PR from the new branch
new_pr = merger.raise_pr('dogecoin/dogecoin', title, body, batch_branch.branch_name)
mark_commits_merged(conn, merger, new_pr, pr_ids)
def test_pr_merge(conn, merger, pr_id):
"""
Test if a pull request can be cleanly merged against the current development branch. Returns true/false
"""
path = merger.config['dogecoin_repo']['path']
repo = merger.repo
# Test if the branch exists already, create it if not
head_branch = merger.create_branch('bitcoin-pr-%d' % pr_id)
if not head_branch:
return False
try:
repo.checkout(head_branch)
if not merger.apply_pull_requests(conn, head_branch, [pr_id]):
return False
# Make sure it's a viable build too
print('Attempting compilation of PR %d' % pr_id)
try:
robodoge.compile_dogecoin(path)
except robodoge.BuildError:
return False
finally:
repo.checkout(merger.safe_branch)
repo.lookup_branch(head_branch.branch_name, pygit2.GIT_BRANCH_LOCAL).delete()
return True
config = robodoge.load_configuration('config.yml')
try:
merger = robodoge.Robodoge(config)
except robodoge.ConfigurationError as err:
print(err.msg)
sys.exit(1)
conn = merger.get_connection()
try:
pr_titles = {}
ordered_pr_ids = []
cursor = conn.cursor()
try:
# Find pull requests to evaluate
cursor.execute(
"""SELECT pr.id, pr.title
FROM pull_request pr
JOIN pull_request_commit commit ON commit.pr_id=pr.id
WHERE commit.to_merge='t' AND commit.merged='f'
ORDER BY pr.merged_at, pr.id ASC""")
for record in cursor:
pr_id = record[0]
if pr_id not in ordered_pr_ids:
ordered_pr_ids.append(pr_id)
pr_titles[pr_id] = record[1]
finally:
cursor.close()
viable_pr_ids = []
for pr_id in ordered_pr_ids:
if test_pr_merge(conn, merger, pr_id):
viable_pr_ids.append(pr_id)
if len(viable_pr_ids) == 4:
try:
raise_pull_request(conn, merger, pr_titles, viable_pr_ids)
except robodoge.BranchCollisionError as err:
print(err.msg)
viable_pr_ids = []
time.sleep(60*60) # Give the server a break
if len(viable_pr_ids) > 0:
try:
raise_pull_request(conn, merger, pr_titles, viable_pr_ids)
except robodoge.BranchCollisionError as err:
print(err.msg)
finally:
conn.close()
merger.repo.checkout(merger.safe_branch)
| [
"time.asctime",
"robodoge.compile_dogecoin",
"pygit2.UserPass",
"robodoge.Robodoge",
"time.sleep",
"robodoge.load_configuration",
"robodoge.write_pr",
"sys.exit",
"time.time"
] | [((2779, 2820), 'robodoge.load_configuration', 'robodoge.load_configuration', (['"""config.yml"""'], {}), "('config.yml')\n", (2806, 2820), False, 'import robodoge\n'), ((1572, 1626), 'pygit2.UserPass', 'pygit2.UserPass', (['merger.private_token', '"""x-oauth-basic"""'], {}), "(merger.private_token, 'x-oauth-basic')\n", (1587, 1626), False, 'import pygit2\n'), ((2839, 2864), 'robodoge.Robodoge', 'robodoge.Robodoge', (['config'], {}), '(config)\n', (2856, 2864), False, 'import robodoge\n'), ((522, 576), 'robodoge.write_pr', 'robodoge.write_pr', (['cursor', 'new_pr', '"""dogecoin/dogecoin"""'], {}), "(cursor, new_pr, 'dogecoin/dogecoin')\n", (539, 576), False, 'import robodoge\n'), ((1036, 1050), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1048, 1050), False, 'import time\n'), ((2931, 2942), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2939, 2942), False, 'import sys\n'), ((1163, 1174), 'time.time', 'time.time', ([], {}), '()\n', (1172, 1174), False, 'import time\n'), ((2518, 2549), 'robodoge.compile_dogecoin', 'robodoge.compile_dogecoin', (['path'], {}), '(path)\n', (2543, 2549), False, 'import robodoge\n'), ((4023, 4042), 'time.sleep', 'time.sleep', (['(60 * 60)'], {}), '(60 * 60)\n', (4033, 4042), False, 'import time\n')] |
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name="SetupPyUTF8",
author="<NAME>",
)
| [
"distutils.core.setup"
] | [((59, 101), 'distutils.core.setup', 'setup', ([], {'name': '"""SetupPyUTF8"""', 'author': '"""<NAME>"""'}), "(name='SetupPyUTF8', author='<NAME>')\n", (64, 101), False, 'from distutils.core import setup\n')] |
Subsets and Splits