code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
import Quandl
authtoken = '<PASSWORD>'
data = pd.read_csv('raw_data/COT_CRUDE.csv', index_col=0, parse_dates=True)
data_cop = pd.read_csv('raw_data/COT_COPPER.csv', index_col=0, parse_dates=True)
fig, ax = plt.subplots()
data.plot(ax=ax)
ax.set_title('Commitments of Traders in WTI')
fig2, ax2 = plt.subplots()
data_cop.plot(ax=ax2)
ax2.set_title('Commitments of Traders in Copper Grade #1')
plt.show()
| [
"matplotlib.pyplot.subplots",
"pandas.read_csv",
"matplotlib.pyplot.show"
] | [((286, 354), 'pandas.read_csv', 'pd.read_csv', (['"""raw_data/COT_CRUDE.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('raw_data/COT_CRUDE.csv', index_col=0, parse_dates=True)\n", (297, 354), True, 'import pandas as pd\n'), ((366, 435), 'pandas.read_csv', 'pd.read_csv', (['"""raw_data/COT_COPPER.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('raw_data/COT_COPPER.csv', index_col=0, parse_dates=True)\n", (377, 435), True, 'import pandas as pd\n'), ((446, 460), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (458, 460), True, 'from matplotlib import pyplot as plt\n'), ((537, 551), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (549, 551), True, 'from matplotlib import pyplot as plt\n'), ((633, 643), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (641, 643), True, 'from matplotlib import pyplot as plt\n')] |
# Author: <NAME>
from kernel.type import TVar, TFun, boolT
from kernel.term import Term, Var, Const, Abs
from logic.conv import Conv, then_conv, all_conv, arg_conv, binop_conv, rewr_conv
from logic.proofterm import ProofTerm, refl
"""Utility functions for logic."""
conj = Const("conj", TFun(boolT, boolT, boolT))
disj = Const("disj", TFun(boolT, boolT, boolT))
neg = Const("neg", TFun(boolT, boolT))
true = Const("true", boolT)
false = Const("false", boolT)
def is_conj(t):
"""Whether t is of the form A & B."""
return t.is_binop() and t.head == conj
def mk_conj(*args):
"""Construct the term s1 & ... & sn."""
if args:
assert isinstance(args[0], Term), "mk_conj: each argument must be a term"
if len(args) > 1:
return conj(args[0], mk_conj(*args[1:]))
else:
return args[0]
else:
return true
def strip_conj(t):
"""Given term of the form s1 & ... & sn, return the list
[s1, ..., sn].
"""
if is_conj(t):
return [t.arg1] + strip_conj(t.arg)
else:
return [t]
def is_disj(t):
"""Whether t is of the form A | B."""
return t.is_binop() and t.head == disj
def mk_disj(*args):
"""Construct the term s1 | ... | sn."""
if args:
assert isinstance(args[0], Term), "mk_disj: each argument must be a term"
if len(args) > 1:
return disj(args[0], mk_disj(*args[1:]))
else:
return args[0]
else:
return false
def strip_disj(t):
"""Given term of the form s1 | ... | sn, return the list
[s1, ..., sn].
"""
if is_disj(t):
return [t.arg1] + strip_disj(t.arg)
else:
return [t]
def is_neg(t):
"""Whether t is of the form ~ A."""
return t.is_comb() and t.fun == neg
def is_exists(t):
"""Whether t is of the form ?x. P x."""
return t.is_comb() and t.fun.is_const() and \
t.fun.name == "exists" and t.arg.is_abs()
def mk_exists(x, body):
"""Given a variable x and a term t possibly depending on x, return
the term ?x. t.
"""
exists_t = Const("exists", TFun(TFun(x.T, boolT), boolT))
return exists_t(Term.mk_abs(x, body))
def subst_norm(t, instsp):
"""Substitute using the given instantiation, then normalize with
respect to beta-conversion.
"""
tyinst, inst = instsp
return t.subst_type(tyinst).subst(inst).beta_norm()
def if_t(T):
return Const("IF", TFun(boolT, T, T, T))
def is_if(t):
"""Whether t is of the form if P then x else y."""
f, args = t.strip_comb()
return f.is_const_name("IF") and len(args) == 3
def mk_if(P, x, y):
"""Obtain the term if P then x else y."""
return if_t(x.get_type())(P, x, y)
def strip_all_implies(t, names):
"""Given a term of the form
!x_1 ... x_k. A_1 --> ... --> A_n --> C.
Return the triple ([v_1, ..., v_k], [A_1, ... A_n], C), where
v_1, ..., v_k are new variables with the given names, and
A_1, ..., A_n, C are the body of the input term, with bound variables
substituted for v_1, ..., v_k.
"""
if Term.is_all(t):
assert len(names) > 0, "strip_all_implies: not enough names input."
assert isinstance(names[0], str), "strip_all_implies: names must be strings."
v = Var(names[0], t.arg.var_T)
vars, As, C = strip_all_implies(t.arg.subst_bound(v), names[1:])
return ([v] + vars, As, C)
else:
assert len(names) == 0, "strip_all_implies: too many names input."
As, C = t.strip_implies()
return ([], As, C)
"""Normalization rules for logic."""
class norm_bool_expr(Conv):
"""Normalize a boolean expression."""
def get_proof_term(self, thy, t):
if is_neg(t):
if t.arg == true:
return rewr_conv("not_true").get_proof_term(thy, t)
elif t.arg == false:
return rewr_conv("not_false").get_proof_term(thy, t)
else:
return refl(t)
else:
return refl(t)
class norm_conj_assoc_clauses(Conv):
"""Normalize (A_1 & ... & A_n) & (B_1 & ... & B_n)."""
def get_proof_term(self, thy, t):
if is_conj(t.arg1):
return then_conv(
rewr_conv("conj_assoc", sym=True),
arg_conv(norm_conj_assoc_clauses())
).get_proof_term(thy, t)
else:
return all_conv().get_proof_term(thy, t)
class norm_conj_assoc(Conv):
"""Normalize conjunction with respect to associativity."""
def get_proof_term(self, thy, t):
if is_conj(t):
return then_conv(
binop_conv(norm_conj_assoc()),
norm_conj_assoc_clauses()
).get_proof_term(thy, t)
else:
return all_conv().get_proof_term(thy, t)
| [
"logic.conv.rewr_conv",
"kernel.term.Const",
"logic.conv.all_conv",
"kernel.type.TFun",
"kernel.term.Term.is_all",
"logic.proofterm.refl",
"kernel.term.Term.mk_abs",
"kernel.term.Var"
] | [((412, 432), 'kernel.term.Const', 'Const', (['"""true"""', 'boolT'], {}), "('true', boolT)\n", (417, 432), False, 'from kernel.term import Term, Var, Const, Abs\n'), ((441, 462), 'kernel.term.Const', 'Const', (['"""false"""', 'boolT'], {}), "('false', boolT)\n", (446, 462), False, 'from kernel.term import Term, Var, Const, Abs\n'), ((291, 316), 'kernel.type.TFun', 'TFun', (['boolT', 'boolT', 'boolT'], {}), '(boolT, boolT, boolT)\n', (295, 316), False, 'from kernel.type import TVar, TFun, boolT\n'), ((339, 364), 'kernel.type.TFun', 'TFun', (['boolT', 'boolT', 'boolT'], {}), '(boolT, boolT, boolT)\n', (343, 364), False, 'from kernel.type import TVar, TFun, boolT\n'), ((385, 403), 'kernel.type.TFun', 'TFun', (['boolT', 'boolT'], {}), '(boolT, boolT)\n', (389, 403), False, 'from kernel.type import TVar, TFun, boolT\n'), ((3084, 3098), 'kernel.term.Term.is_all', 'Term.is_all', (['t'], {}), '(t)\n', (3095, 3098), False, 'from kernel.term import Term, Var, Const, Abs\n'), ((2160, 2180), 'kernel.term.Term.mk_abs', 'Term.mk_abs', (['x', 'body'], {}), '(x, body)\n', (2171, 2180), False, 'from kernel.term import Term, Var, Const, Abs\n'), ((2439, 2459), 'kernel.type.TFun', 'TFun', (['boolT', 'T', 'T', 'T'], {}), '(boolT, T, T, T)\n', (2443, 2459), False, 'from kernel.type import TVar, TFun, boolT\n'), ((3274, 3300), 'kernel.term.Var', 'Var', (['names[0]', 't.arg.var_T'], {}), '(names[0], t.arg.var_T)\n', (3277, 3300), False, 'from kernel.term import Term, Var, Const, Abs\n'), ((2114, 2130), 'kernel.type.TFun', 'TFun', (['x.T', 'boolT'], {}), '(x.T, boolT)\n', (2118, 2130), False, 'from kernel.type import TVar, TFun, boolT\n'), ((4007, 4014), 'logic.proofterm.refl', 'refl', (['t'], {}), '(t)\n', (4011, 4014), False, 'from logic.proofterm import ProofTerm, refl\n'), ((3966, 3973), 'logic.proofterm.refl', 'refl', (['t'], {}), '(t)\n', (3970, 3973), False, 'from logic.proofterm import ProofTerm, refl\n'), ((4381, 4391), 'logic.conv.all_conv', 'all_conv', ([], {}), '()\n', (4389, 4391), False, 'from logic.conv import Conv, then_conv, all_conv, arg_conv, binop_conv, rewr_conv\n'), ((4758, 4768), 'logic.conv.all_conv', 'all_conv', ([], {}), '()\n', (4766, 4768), False, 'from logic.conv import Conv, then_conv, all_conv, arg_conv, binop_conv, rewr_conv\n'), ((3778, 3799), 'logic.conv.rewr_conv', 'rewr_conv', (['"""not_true"""'], {}), "('not_true')\n", (3787, 3799), False, 'from logic.conv import Conv, then_conv, all_conv, arg_conv, binop_conv, rewr_conv\n'), ((4224, 4257), 'logic.conv.rewr_conv', 'rewr_conv', (['"""conj_assoc"""'], {'sym': '(True)'}), "('conj_assoc', sym=True)\n", (4233, 4257), False, 'from logic.conv import Conv, then_conv, all_conv, arg_conv, binop_conv, rewr_conv\n'), ((3879, 3901), 'logic.conv.rewr_conv', 'rewr_conv', (['"""not_false"""'], {}), "('not_false')\n", (3888, 3901), False, 'from logic.conv import Conv, then_conv, all_conv, arg_conv, binop_conv, rewr_conv\n')] |
from zipfile import ZipFile
from os import path
from src.prefixes import SMPDB
from src.babel_utils import pull_via_urllib
def pull_smpdb():
dname = pull_via_urllib('http://smpdb.ca/downloads/','smpdb_pathways.csv.zip',decompress=False,subpath='SMPDB')
ddir = path.dirname(dname)
with ZipFile(dname, 'r') as zipObj:
zipObj.extractall(ddir)
def make_labels(inputfile,labelfile):
"""Get the SMPDB file. It's not good - there are \n and commas, and commas are also the delimiter. I mean, what?"""
with open(inputfile,'r') as inf, open(labelfile,'w') as outf:
h = inf.readline()
for line in inf:
if ',' not in line:
continue
if not line.startswith('SMP'):
continue
#print(line)
#my god what a stupid file. It's a csv, but there are commas in the data. Not only that
# but the last column is quoted, but the next to the last column (which also has commas) is not.
frontline = line.split('"')[0][:-1] #remove the last (quoted) column
x = frontline.strip().split(',')
ident = f'{SMPDB}:{x[0]}'
name = ','.join(x[2:]) #get the rest of the splits and put them back together.
if len(name) > 0:
outf.write(f'{ident}\t{name}\n')
| [
"os.path.dirname",
"src.babel_utils.pull_via_urllib",
"zipfile.ZipFile"
] | [((154, 264), 'src.babel_utils.pull_via_urllib', 'pull_via_urllib', (['"""http://smpdb.ca/downloads/"""', '"""smpdb_pathways.csv.zip"""'], {'decompress': '(False)', 'subpath': '"""SMPDB"""'}), "('http://smpdb.ca/downloads/', 'smpdb_pathways.csv.zip',\n decompress=False, subpath='SMPDB')\n", (169, 264), False, 'from src.babel_utils import pull_via_urllib\n'), ((269, 288), 'os.path.dirname', 'path.dirname', (['dname'], {}), '(dname)\n', (281, 288), False, 'from os import path\n'), ((298, 317), 'zipfile.ZipFile', 'ZipFile', (['dname', '"""r"""'], {}), "(dname, 'r')\n", (305, 317), False, 'from zipfile import ZipFile\n')] |
import datetime
import json
from typing import List, Dict, Union
import pyrfc3339
import responses
import unittest
import urllib
from selfhost_client import (
TimeseriesClient,
TimeseriesType,
TimeseriesDataPointType,
TimeseriesDataType,
TimeseriesDataPointResponse, TimeseriesDataResponse
)
class TestPCTTimeseriesClient(unittest.TestCase):
def setUp(self) -> None:
self.base_url: str = 'http://example.com'
self.username: str = 'test'
self.password: str = '<PASSWORD>'
self.client: TimeseriesClient = TimeseriesClient(
base_url=self.base_url,
username=self.username,
password=self.password
)
@responses.activate
def test_get_timeseries(self) -> None:
mock_response: List[TimeseriesType] = [
{
'created_by': '1740f1e4-d2c6-4943-9976-9ff10eab90b2',
'lower_bound': 0,
'name': 'new timeseries',
'si_unit': 'C',
'tags': [
'tag1',
'tag2'
],
'thing_uuid': 'e21ae595-15a5-4f11-8992-9d33600cc1ee',
'upper_bound': 0,
'uuid': 'a21ae595-15a5-4f11-8992-9d33600cc1ee'
}
]
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.GET,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}',
json=mock_response,
status=200
)
params: Dict[str, Union[int, List[str]]] = {
'limit': 20,
'offset': 0,
'tags': ['tag', 'tag2']
}
res: List[TimeseriesType] = self.client.get_timeseries(**params)
self.assertEqual(res, mock_response)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}'
f'?{urllib.parse.urlencode(params, doseq=True)}'
)
self.assertEqual(responses.calls[0].request.params.get('limit'), str(params['limit']))
self.assertEqual(responses.calls[0].request.params.get('offset'), str(params['offset']))
self.assertEqual(responses.calls[0].request.params.get('tags'), params['tags'])
@responses.activate
def test_create_timeseries(self) -> None:
mock_response: TimeseriesType = {
'created_by': '1740f1e4-d2c6-4943-9976-9ff10eab90b2',
'lower_bound': 0,
'name': 'new timeseries',
'si_unit': 'C',
'tags': [
'tag1',
'tag2'
],
'thing_uuid': 'e21ae595-15a5-4f11-8992-9d33600cc1ee',
'upper_bound': 0,
'uuid': 'a21ae595-15a5-4f11-8992-9d33600cc1ee'
}
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.POST,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}',
json=mock_response,
status=201
)
params: Dict[str, Union[int, str, List[str]]] = {
'name': 'new timeseries',
'si_unit': 'C',
'thing_uuid': 'e21ae595-15a5-4f11-8992-9d33600cc1ee',
'lower_bound': 0,
'upper_bound': 0,
'tags': [
'tag1',
'tag2'
]
}
res: TimeseriesType = self.client.create_timeseries(**params)
self.assertEqual(res, mock_response)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}'
)
self.assertEqual(json.loads(responses.calls[0].request.body.decode('utf-8')), params)
@responses.activate
def test_get_timeseries_by_uuid(self) -> None:
mock_response: TimeseriesType = {
'created_by': '1740f1e4-d2c6-4943-9976-9ff10eab90b2',
'lower_bound': 0,
'name': 'new timeseries',
'si_unit': 'C',
'tags': [
'tag1',
'tag2'
],
'thing_uuid': 'e21ae595-15a5-4f11-8992-9d33600cc1ee',
'upper_bound': 0,
'uuid': 'a21ae595-15a5-4f11-8992-9d33600cc1ee'
}
timeseries_uuid: str = '5ce5d3cd-ff99-4342-a19e-fdb1b5805178'
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.GET,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}',
json=mock_response,
status=200
)
res: TimeseriesType = self.client.get_timeseries_by_uuid(timeseries_uuid)
self.assertEqual(res, mock_response)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'
)
@responses.activate
def test_update_timeseries(self) -> None:
timeseries_uuid: str = '7e7823cc-44fa-403d-853f-d5ce48a002e4'
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.PUT,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}',
status=204
)
params: Dict[str, Union[int, str, List[str]]] = {
'name': 'updated timeseries',
'si_unit': 'C',
'thing_uuid': '7e7823cc-44fa-403d-853f-d5ce48a002e4',
'lower_bound': 0,
'upper_bound': 0,
'tags': [
'tag1',
'tag2'
]
}
self.client.update_timeseries(timeseries_uuid, **params)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'
)
self.assertEqual(json.loads(responses.calls[0].request.body.decode('utf-8')), params)
@responses.activate
def test_delete_timeseries(self) -> None:
timeseries_uuid: str = '7e7823cc-44fa-403d-853f-d5ce48a002e4'
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.DELETE,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}',
status=204
)
self.client.delete_timeseries(timeseries_uuid)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'
)
@responses.activate
def test_get_timeseries_data(self) -> None:
mock_response: List[TimeseriesDataPointResponse] = [
{
'ts': '2022-01-14T12:43:44.147Z',
'v': 3.14
}
]
timeseries_uuid: str = 'Ze7823cc-44fa-403d-853f-d5ce48a002e4'
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.GET,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'
f'/data',
json=mock_response,
status=200
)
params: Dict[str, Union[str, int, datetime.datetime]] = {
'start': pyrfc3339.parse('2022-01-14T12:43:44.147Z'),
'end': pyrfc3339.parse('2022-01-14T12:43:44.147Z'),
'unit': 'C',
'ge': 0,
'le': 0,
'precision': 'second',
'aggregate': 'avg',
'timezone': 'UTC'
}
expected_query_params: Dict[str, Union[str, int]] = {
**params,
'start': params['start'].isoformat(),
'end': params['end'].isoformat()
}
res: List[TimeseriesDataPointType] = self.client.get_timeseries_data(timeseries_uuid, **params)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data'
f'?{urllib.parse.urlencode(expected_query_params)}'
)
self.assertEqual(responses.calls[0].request.params.get('start'), params['start'].isoformat())
self.assertEqual(responses.calls[0].request.params.get('end'), params['end'].isoformat())
self.assertEqual(responses.calls[0].request.params.get('unit'), params['unit'])
self.assertEqual(responses.calls[0].request.params.get('ge'), str(params['ge']))
self.assertEqual(responses.calls[0].request.params.get('le'), str(params['le']))
self.assertEqual(responses.calls[0].request.params.get('precision'), params['precision'])
self.assertEqual(responses.calls[0].request.params.get('aggregate'), params['aggregate'])
self.assertEqual(responses.calls[0].request.params.get('timezone'), params['timezone'])
self.assertEqual(res[0]['v'], mock_response[0]['v'])
self.assertEqual(
res[0]['ts'],
pyrfc3339.parse('2022-01-14T12:43:44.147Z')
)
@responses.activate
def test_create_timeseries_data(self) -> None:
timeseries_uuid: str = 'Ze7823cc-44fa-403d-853f-d5ce48a002e4'
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.POST,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'
f'/data',
status=201
)
unit: str = 'C'
body: List[TimeseriesDataPointType] = [
{'ts': pyrfc3339.parse('2022-01-14T12:52:04.147Z'), 'v': 3.01},
{'ts': pyrfc3339.parse('2022-01-14T12:52:04.147Z'), 'v': 3.99}
]
self.client.create_timeseries_data(timeseries_uuid, body, unit)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data'
f'?{urllib.parse.urlencode({"unit": unit})}'
)
self.assertEqual(responses.calls[0].request.params.get('unit'), unit)
sent_body: List[TimeseriesDataPointResponse] = json.loads(responses.calls[0].request.body.decode('utf-8'))
self.assertEqual(sent_body[0]['ts'], body[0]['ts'].isoformat())
self.assertEqual(sent_body[0]['v'], body[0]['v'])
self.assertEqual(sent_body[1]['ts'], body[1]['ts'].isoformat())
self.assertEqual(sent_body[1]['v'], body[1]['v'])
@responses.activate
def test_delete_timeseries_data(self) -> None:
timeseries_uuid: str = '7e7823cc-44fa-403d-853f-d5ce48a002e4'
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.DELETE,
url=f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'
f'/data',
status=204
)
params: Dict[str, Union[str, int, datetime.datetime]] = {
'start': pyrfc3339.parse('2022-01-14T12:52:04.147Z'),
'end': pyrfc3339.parse('2022-01-14T12:52:04.147Z'),
'ge': 0,
'le': 0
}
expected_query_params: Dict[str, Union[str, int]] = {
**params,
'start': params['start'].isoformat(),
'end': params['end'].isoformat()
}
self.client.delete_timeseries_data(timeseries_uuid, **params)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data'
f'?{urllib.parse.urlencode(expected_query_params)}'
)
self.assertEqual(responses.calls[0].request.params.get('start'), params['start'].isoformat())
self.assertEqual(responses.calls[0].request.params.get('end'), params['end'].isoformat())
self.assertEqual(responses.calls[0].request.params.get('ge'), str(params['ge']))
self.assertEqual(responses.calls[0].request.params.get('le'), str(params['le']))
@responses.activate
def test_get_multiple_timeseries_data(self) -> None:
mock_response: List[TimeseriesDataResponse] = [
{
'data': [{
'ts': '2022-01-14T12:43:44.147Z',
'v': 3.14
}],
'uuid': 'ze7823cc-44fa-403d-853f-d5ce48a002e4'
}
]
with self.subTest('call successful with complete parameter list'):
responses.add(
responses.GET,
url=f'{self.base_url}/{self.client._api_version}/tsquery',
json=mock_response,
status=200
)
params: Dict[str, Union[str, int, List[str], datetime.datetime]] = {
'uuids': ['be7823cc-44fa-403d-853f-d5ce48a002e4', 'ze7823cc-44fa-403d-853f-d5ce48a002e4'],
'start': pyrfc3339.parse('2022-01-14T12:43:44.147Z'),
'end': pyrfc3339.parse('2022-01-14T12:43:44.147Z'),
'unit': 'C',
'ge': 0,
'le': 0,
'precision': 'second',
'aggregate': 'avg',
'timezone': 'UTC'
}
expected_query_params: Dict[str, Union[str, int, List[str]]] = {
**params,
'start': params['start'].isoformat(),
'end': params['end'].isoformat()
}
res: List[TimeseriesDataType] = self.client.get_multiple_timeseries_data(**params)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
f'{self.base_url}/{self.client._api_version}/tsquery'
f'?{urllib.parse.urlencode(expected_query_params, doseq=True)}'
)
self.assertEqual(responses.calls[0].request.params.get('uuids'), params['uuids'])
self.assertEqual(responses.calls[0].request.params.get('start'), params['start'].isoformat())
self.assertEqual(responses.calls[0].request.params.get('end'), params['end'].isoformat())
self.assertEqual(responses.calls[0].request.params.get('unit'), params['unit'])
self.assertEqual(responses.calls[0].request.params.get('ge'), str(params['ge']))
self.assertEqual(responses.calls[0].request.params.get('le'), str(params['le']))
self.assertEqual(responses.calls[0].request.params.get('precision'), params['precision'])
self.assertEqual(responses.calls[0].request.params.get('aggregate'), params['aggregate'])
self.assertEqual(responses.calls[0].request.params.get('timezone'), params['timezone'])
self.assertEqual(res[0]['data'][0]['v'], mock_response[0]['data'][0]['v'])
self.assertEqual(
res[0]['data'][0]['ts'],
pyrfc3339.parse('2022-01-14T12:43:44.147Z')
)
| [
"urllib.parse.urlencode",
"selfhost_client.TimeseriesClient",
"responses.add",
"pyrfc3339.parse"
] | [((563, 656), 'selfhost_client.TimeseriesClient', 'TimeseriesClient', ([], {'base_url': 'self.base_url', 'username': 'self.username', 'password': 'self.password'}), '(base_url=self.base_url, username=self.username, password=\n self.password)\n', (579, 656), False, 'from selfhost_client import TimeseriesClient, TimeseriesType, TimeseriesDataPointType, TimeseriesDataType, TimeseriesDataPointResponse, TimeseriesDataResponse\n'), ((1384, 1540), 'responses.add', 'responses.add', (['responses.GET'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}"""', 'json': 'mock_response', 'status': '(200)'}), "(responses.GET, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}'\n , json=mock_response, status=200)\n", (1397, 1540), False, 'import responses\n'), ((3119, 3276), 'responses.add', 'responses.add', (['responses.POST'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}"""', 'json': 'mock_response', 'status': '(201)'}), "(responses.POST, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}'\n , json=mock_response, status=201)\n", (3132, 3276), False, 'import responses\n'), ((4886, 5060), 'responses.add', 'responses.add', (['responses.GET'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}"""', 'json': 'mock_response', 'status': '(200)'}), "(responses.GET, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'\n , json=mock_response, status=200)\n", (4899, 5060), False, 'import responses\n'), ((5756, 5910), 'responses.add', 'responses.add', (['responses.PUT'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}"""', 'status': '(204)'}), "(responses.PUT, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'\n , status=204)\n", (5769, 5910), False, 'import responses\n'), ((7013, 7170), 'responses.add', 'responses.add', (['responses.DELETE'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}"""', 'status': '(204)'}), "(responses.DELETE, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}'\n , status=204)\n", (7026, 7170), False, 'import responses\n'), ((7951, 8130), 'responses.add', 'responses.add', (['responses.GET'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data"""', 'json': 'mock_response', 'status': '(200)'}), "(responses.GET, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data'\n , json=mock_response, status=200)\n", (7964, 8130), False, 'import responses\n'), ((10511, 10671), 'responses.add', 'responses.add', (['responses.POST'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data"""', 'status': '(201)'}), "(responses.POST, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data'\n , status=201)\n", (10524, 10671), False, 'import responses\n'), ((12119, 12281), 'responses.add', 'responses.add', (['responses.DELETE'], {'url': 'f"""{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data"""', 'status': '(204)'}), "(responses.DELETE, url=\n f'{self.base_url}/{self.client._api_version}/{self.client._timeseries_api_path}/{timeseries_uuid}/data'\n , status=204)\n", (12132, 12281), False, 'import responses\n'), ((14102, 14231), 'responses.add', 'responses.add', (['responses.GET'], {'url': 'f"""{self.base_url}/{self.client._api_version}/tsquery"""', 'json': 'mock_response', 'status': '(200)'}), "(responses.GET, url=\n f'{self.base_url}/{self.client._api_version}/tsquery', json=\n mock_response, status=200)\n", (14115, 14231), False, 'import responses\n'), ((8319, 8362), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:43:44.147Z"""'], {}), "('2022-01-14T12:43:44.147Z')\n", (8334, 8362), False, 'import pyrfc3339\n'), ((8387, 8430), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:43:44.147Z"""'], {}), "('2022-01-14T12:43:44.147Z')\n", (8402, 8430), False, 'import pyrfc3339\n'), ((10220, 10263), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:43:44.147Z"""'], {}), "('2022-01-14T12:43:44.147Z')\n", (10235, 10263), False, 'import pyrfc3339\n'), ((12454, 12497), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:52:04.147Z"""'], {}), "('2022-01-14T12:52:04.147Z')\n", (12469, 12497), False, 'import pyrfc3339\n'), ((12522, 12565), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:52:04.147Z"""'], {}), "('2022-01-14T12:52:04.147Z')\n", (12537, 12565), False, 'import pyrfc3339\n'), ((14514, 14557), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:43:44.147Z"""'], {}), "('2022-01-14T12:43:44.147Z')\n", (14529, 14557), False, 'import pyrfc3339\n'), ((14582, 14625), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:43:44.147Z"""'], {}), "('2022-01-14T12:43:44.147Z')\n", (14597, 14625), False, 'import pyrfc3339\n'), ((16503, 16546), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:43:44.147Z"""'], {}), "('2022-01-14T12:43:44.147Z')\n", (16518, 16546), False, 'import pyrfc3339\n'), ((10853, 10896), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:52:04.147Z"""'], {}), "('2022-01-14T12:52:04.147Z')\n", (10868, 10896), False, 'import pyrfc3339\n'), ((10933, 10976), 'pyrfc3339.parse', 'pyrfc3339.parse', (['"""2022-01-14T12:52:04.147Z"""'], {}), "('2022-01-14T12:52:04.147Z')\n", (10948, 10976), False, 'import pyrfc3339\n'), ((2156, 2198), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['params'], {'doseq': '(True)'}), '(params, doseq=True)\n', (2178, 2198), False, 'import urllib\n'), ((9226, 9271), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['expected_query_params'], {}), '(expected_query_params)\n', (9248, 9271), False, 'import urllib\n'), ((11354, 11392), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (["{'unit': unit}"], {}), "({'unit': unit})\n", (11376, 11392), False, 'import urllib\n'), ((13189, 13234), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['expected_query_params'], {}), '(expected_query_params)\n', (13211, 13234), False, 'import urllib\n'), ((15370, 15427), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['expected_query_params'], {'doseq': '(True)'}), '(expected_query_params, doseq=True)\n', (15392, 15427), False, 'import urllib\n')] |
import argparse
import copy
import inspect
import itertools
import json
import math
import subprocess
import sys
from pathlib import Path
from typing import List, Any
import transformers.optimization as module_optim
import model.model as module_arch
class Config:
def __init__(self, config_dir: Path) -> None:
config_dir.mkdir(exist_ok=True)
self.config_dir = config_dir
self.log = []
def write(self, **config) -> None:
config_path = self.config_dir / f'{config["name"]}.json'
if config_path in self.log:
return
with config_path.open('w') as f:
json.dump(config, f, indent=2, ensure_ascii=False)
self.log.append(config_path)
print(config_path)
def main() -> None:
all_models = [m[0] for m in inspect.getmembers(module_arch, inspect.isclass)
if m[1].__module__ == module_arch.__name__]
all_lr_schedulers = [m[0][4:] for m in inspect.getmembers(module_optim, inspect.isfunction)
if m[1].__module__ == module_optim.__name__ and m[0].startswith('get_')]
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=(lambda p: Path(p)), default='config',
help='path to output directory')
parser.add_argument('-d', '--dataset', type=(lambda p: Path(p)),
help='path to dataset directory')
parser.add_argument('-m', '--model', choices=all_models, default=all_models, nargs='*',
help='model name')
parser.add_argument('-e', '--epoch', type=int, default=[4], nargs='*',
help='number of training epochs')
parser.add_argument('-b', '--batch-size', type=int, default=16,
help='number of batch size')
parser.add_argument('--max-bpg', type=int, default=None,
help='max batch size per GPU')
parser.add_argument('--eval-batch-size', type=int, default=None,
help='number of batch size for evaluation (default: same as that of training)')
parser.add_argument('--coreference', '--coref', '--cr', action='store_true', default=False,
help='perform coreference resolution')
parser.add_argument('--bridging', '--brg', '--bar', action='store_true', default=False,
help='perform bridging anaphora resolution')
parser.add_argument('--cases', default='ガ ヲ ニ ガ2'.split(), nargs='*',
help='cases to perform PAS analysis (default: ガ ヲ ニ ガ2)')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout ratio')
parser.add_argument('--lr', type=float, default=5e-5,
help='learning rate')
parser.add_argument('--lr-schedule', choices=all_lr_schedulers, type=str, default='linear_schedule_with_warmup',
help='lr scheduler')
parser.add_argument('--warmup-proportion', type=float, default=0.1,
help='Proportion of training to perform linear learning rate warmup for')
parser.add_argument('--warmup-steps', type=int, default=None,
help='Linear warmup over warmup_steps.')
parser.add_argument('--additional-name', '--name', type=str, default=None,
help='additional config file name')
parser.add_argument('--gpus', type=int, default=8,
help='number of gpus to use')
parser.add_argument('--save-start-epoch', type=int, default=1,
help='you can skip saving of initial checkpoints, which reduces writing overhead')
parser.add_argument('--corpus', choices=['kwdlc', 'kc', 'fuman'], default=['kwdlc', 'kc'], nargs='*',
help='corpus to use in training')
parser.add_argument('--train-target', choices=['overt', 'dep', 'zero'], default=['overt', 'dep', 'zero'], nargs='*',
help='dependency type to train')
parser.add_argument('--pas-target', choices=['none', 'pred', 'noun', 'all'], default=['pred'], nargs='*',
help='PAS analysis target (pred: verbal predicates, noun: nominal predicates, all: both)')
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode')
args = parser.parse_args()
config = Config(args.config)
data_root: Path = args.dataset.resolve()
with data_root.joinpath('config.json').open() as f:
dataset_config = json.load(f)
exophors = dataset_config['exophors']
# cases: List[str] = args.case_string.split(',') if args.case_string else []
msg = '"ノ" found in case string. If you want to perform bridging anaphora resolution, specify "--bridging" option'
assert 'ノ' not in args.cases, msg
pas_targets_list = [['pred'] * (t in ('pred', 'all')) + ['noun'] * (t in ('noun', 'all')) for t in args.pas_target]
for model, n_epoch, pas_targets in itertools.product(args.model, args.epoch, pas_targets_list):
items: List[Any] = [model]
corpus2abbr = {'kwdlc': 'w', 'kc': 'n', 'fuman': 'f'}
items += [''.join(corpus2abbr[c] for c in args.corpus), f'{n_epoch}e', dataset_config['bert_name']]
if pas_targets:
items.append(''.join(tgt[0] for tgt in ('overt', 'dep', 'zero') if tgt in args.train_target))
if 'pred' in pas_targets:
items.append('vpa')
if 'noun' in pas_targets:
items.append('npa')
if args.bridging:
items.append('bar')
if args.coreference:
items.append('cr')
if args.debug:
items.append('debug')
if args.additional_name:
items.append(args.additional_name)
name = '-'.join(str(x) for x in items)
cases = args.cases if pas_targets else []
num_train_examples = 0
if 'kwdlc' in args.corpus:
num_train_examples += dataset_config['num_examples']['kwdlc']['train']
if 'kc' in args.corpus:
num_train_examples += dataset_config['num_examples']['kc']['train']
if 'fuman' in args.corpus:
num_train_examples += dataset_config['num_examples']['fuman']['train']
if model == 'CommonsenseModel':
num_train_examples += dataset_config['num_examples']['commonsense']['train']
arch = {
'type': model,
'args': {
'bert_model': dataset_config['bert_path'],
'vocab_size': dataset_config['vocab_size'] + len(exophors) + 1 + int(args.coreference),
'dropout': args.dropout,
'num_case': len(cases) + int(args.bridging),
'coreference': args.coreference,
},
}
dataset = {
'type': 'PASDataset',
'args': {
'path': None,
'gold_path': None,
'cases': cases,
'exophors': exophors,
'coreference': args.coreference,
'bridging': args.bridging,
'max_seq_length': dataset_config['max_seq_length'],
'bert_path': dataset_config['bert_path'],
'training': None,
'kc': None,
'train_targets': args.train_target,
'pas_targets': pas_targets,
'n_jobs': -1 if args.debug is False else 0,
},
}
train_datasets = {}
valid_datasets = {}
test_datasets = {}
for corpus in args.corpus:
train_dataset = copy.deepcopy(dataset)
train_dataset['args']['path'] = str(data_root / corpus / 'train')
train_dataset['args']['training'] = True
train_dataset['args']['kc'] = (corpus == 'kc')
train_datasets[corpus] = train_dataset
valid_dataset = copy.deepcopy(dataset)
valid_dataset['args']['path'] = str(data_root / corpus / 'valid')
valid_dataset['args']['gold_path'] = str(data_root / corpus / 'valid_gold')
valid_dataset['args']['training'] = False
valid_dataset['args']['kc'] = (corpus == 'kc')
valid_datasets[corpus] = valid_dataset
test_dataset = copy.deepcopy(dataset)
test_dataset['args']['path'] = str(data_root / corpus / 'test')
test_dataset['args']['gold_path'] = str(data_root / corpus / 'test_gold')
test_dataset['args']['training'] = False
test_dataset['args']['kc'] = (corpus == 'kc')
test_datasets[corpus] = test_dataset
if model == 'CommonsenseModel':
commonsense_dataset = {
'type': 'CommonsenseDataset',
'args': {
'path': None,
'max_seq_length': dataset_config['max_seq_length'],
'num_special_tokens': len(exophors) + 1 + int(args.coreference),
'bert_model': dataset_config['bert_path'],
},
}
train_commonsense_dataset = copy.deepcopy(commonsense_dataset)
train_commonsense_dataset['args']['path'] = str(data_root / 'commonsense' / 'train.pkl')
train_datasets['commonsense'] = train_commonsense_dataset
valid_commonsense_dataset = copy.deepcopy(commonsense_dataset)
valid_commonsense_dataset['args']['path'] = str(data_root / 'commonsense' / 'valid.pkl')
valid_datasets['commonsense'] = valid_commonsense_dataset
test_commonsense_dataset = copy.deepcopy(commonsense_dataset)
test_commonsense_dataset['args']['path'] = str(data_root / 'commonsense' / 'test.pkl')
test_datasets['commonsense'] = test_commonsense_dataset
data_loader = {
'type': 'PASDataLoader',
'args': {
'batch_size': args.batch_size,
'shuffle': None,
'num_workers': 0 if args.debug else 4,
'pin_memory': True,
},
}
data_loaders = {}
train_data_loader = copy.deepcopy(data_loader)
train_data_loader['args']['shuffle'] = (not args.debug)
data_loaders['train'] = train_data_loader
valid_data_loader = copy.deepcopy(data_loader)
valid_data_loader['args']['batch_size'] = args.eval_batch_size if args.eval_batch_size else args.batch_size
valid_data_loader['args']['shuffle'] = False
data_loaders['valid'] = valid_data_loader
data_loaders['test'] = copy.deepcopy(valid_data_loader)
optimizer = {
'type': 'AdamW',
'args': {
'lr': args.lr,
'eps': 1e-8,
'weight_decay': 0.01,
},
}
metrics = []
if pas_targets:
if 'ガ' in cases:
metrics.append('case_analysis_f1_ga')
metrics.append('zero_anaphora_f1_ga')
if 'ヲ' in cases:
metrics.append('case_analysis_f1_wo')
metrics.append('zero_anaphora_f1_wo')
if 'ニ' in cases:
metrics.append('case_analysis_f1_ni')
metrics.append('zero_anaphora_f1_ni')
if 'ガ2' in cases:
metrics.append('case_analysis_f1_ga2')
metrics.append('zero_anaphora_f1_ga2')
if {'ガ', 'ヲ', 'ニ', 'ガ2'} & set(cases):
metrics += [
'case_analysis_f1',
'zero_anaphora_f1_inter',
'zero_anaphora_f1_intra',
'zero_anaphora_f1_exophora',
'zero_anaphora_f1',
'pas_analysis_f1',
]
if args.coreference:
metrics.append('coreference_f1')
if args.bridging:
metrics.append('bridging_anaphora_f1')
t_total = math.ceil(num_train_examples / args.batch_size) * n_epoch
warmup_steps = args.warmup_steps if args.warmup_steps is not None else t_total * args.warmup_proportion
lr_scheduler = {'type': 'get_' + args.lr_schedule}
if args.lr_schedule == 'constant_schedule':
lr_scheduler['args'] = {}
elif args.lr_schedule == 'constant_schedule_with_warmup':
lr_scheduler['args'] = {'num_warmup_steps': warmup_steps}
elif args.lr_schedule in ('linear_schedule_with_warmup',
'cosine_schedule_with_warmup',
'cosine_with_hard_restarts_schedule_with_warmup'):
lr_scheduler['args'] = {'num_warmup_steps': warmup_steps, 'num_training_steps': t_total}
else:
raise ValueError(f'unknown lr schedule: {args.lr_schedule}')
mnt_mode = 'max'
mnt_metric = 'val_all_'
if 'pas_analysis_f1' in metrics:
mnt_metric += 'pas_analysis_f1'
elif 'coreference_f1' in metrics:
mnt_metric += 'coreference_f1'
elif 'bridging_anaphora_f1' in metrics:
mnt_metric += 'bridging_anaphora_f1'
else:
raise ValueError('no metric to evaluate')
trainer = {
'epochs': n_epoch,
'batch_size': args.batch_size,
'max_bpg': args.max_bpg if args.max_bpg is not None else args.batch_size,
'save_dir': 'result/',
'save_start_epoch': args.save_start_epoch,
'verbosity': 2 if args.debug else 1, # 0: WARNING, 1: INFO, 2: DEBUG
'monitor': f'{mnt_mode} {mnt_metric}',
'early_stop': 10,
}
config.write(
name=name,
commit=subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip(),
args=' '.join(sys.argv),
n_gpu=args.gpus,
arch=arch,
train_datasets=train_datasets,
valid_datasets=valid_datasets,
test_datasets=test_datasets,
data_loaders=data_loaders,
optimizer=optimizer,
metrics=metrics,
lr_scheduler=lr_scheduler,
trainer=trainer,
)
if __name__ == '__main__':
main()
| [
"subprocess.check_output",
"inspect.getmembers",
"math.ceil",
"argparse.ArgumentParser",
"pathlib.Path",
"itertools.product",
"copy.deepcopy",
"json.load",
"json.dump"
] | [((1117, 1142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1140, 1142), False, 'import argparse\n'), ((4989, 5048), 'itertools.product', 'itertools.product', (['args.model', 'args.epoch', 'pas_targets_list'], {}), '(args.model, args.epoch, pas_targets_list)\n', (5006, 5048), False, 'import itertools\n'), ((4536, 4548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4545, 4548), False, 'import json\n'), ((10128, 10154), 'copy.deepcopy', 'copy.deepcopy', (['data_loader'], {}), '(data_loader)\n', (10141, 10154), False, 'import copy\n'), ((10298, 10324), 'copy.deepcopy', 'copy.deepcopy', (['data_loader'], {}), '(data_loader)\n', (10311, 10324), False, 'import copy\n'), ((10575, 10607), 'copy.deepcopy', 'copy.deepcopy', (['valid_data_loader'], {}), '(valid_data_loader)\n', (10588, 10607), False, 'import copy\n'), ((630, 680), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(config, f, indent=2, ensure_ascii=False)\n', (639, 680), False, 'import json\n'), ((799, 847), 'inspect.getmembers', 'inspect.getmembers', (['module_arch', 'inspect.isclass'], {}), '(module_arch, inspect.isclass)\n', (817, 847), False, 'import inspect\n'), ((953, 1005), 'inspect.getmembers', 'inspect.getmembers', (['module_optim', 'inspect.isfunction'], {}), '(module_optim, inspect.isfunction)\n', (971, 1005), False, 'import inspect\n'), ((7604, 7626), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (7617, 7626), False, 'import copy\n'), ((7897, 7919), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (7910, 7919), False, 'import copy\n'), ((8278, 8300), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (8291, 8300), False, 'import copy\n'), ((9099, 9133), 'copy.deepcopy', 'copy.deepcopy', (['commonsense_dataset'], {}), '(commonsense_dataset)\n', (9112, 9133), False, 'import copy\n'), ((9346, 9380), 'copy.deepcopy', 'copy.deepcopy', (['commonsense_dataset'], {}), '(commonsense_dataset)\n', (9359, 9380), False, 'import copy\n'), ((9592, 9626), 'copy.deepcopy', 'copy.deepcopy', (['commonsense_dataset'], {}), '(commonsense_dataset)\n', (9605, 9626), False, 'import copy\n'), ((11930, 11977), 'math.ceil', 'math.ceil', (['(num_train_examples / args.batch_size)'], {}), '(num_train_examples / args.batch_size)\n', (11939, 11977), False, 'import math\n'), ((1201, 1208), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (1205, 1208), False, 'from pathlib import Path\n'), ((1345, 1352), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (1349, 1352), False, 'from pathlib import Path\n'), ((13689, 13742), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', 'HEAD']"], {}), "(['git', 'rev-parse', 'HEAD'])\n", (13712, 13742), False, 'import subprocess\n')] |
import unittest
import pandas as pd
class TestPandas(unittest.TestCase):
def test_read_csv(self):
data = pd.read_csv("/input/tests/data/train.csv")
self.assertEqual(14915, data.size)
def test_read_feather(self):
data = pd.read_feather("/input/tests/data/feather-0_3_1.feather")
self.assertEqual(10, data.size)
| [
"pandas.read_feather",
"pandas.read_csv"
] | [((123, 165), 'pandas.read_csv', 'pd.read_csv', (['"""/input/tests/data/train.csv"""'], {}), "('/input/tests/data/train.csv')\n", (134, 165), True, 'import pandas as pd\n'), ((259, 317), 'pandas.read_feather', 'pd.read_feather', (['"""/input/tests/data/feather-0_3_1.feather"""'], {}), "('/input/tests/data/feather-0_3_1.feather')\n", (274, 317), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 6 13:04:33 2018
@author: Adam
"""
import numpy as np
from numpy import *
import matplotlib.pyplot as plt
import time
# load pre-processed data
stance_index = np.load('data/stance_index.npy')
word2vec_cosine_similarity = np.load( 'data/word2vec_cosine_similarity.npy' )
tf_idf_similarity = np.load('data/tf_idf_similarity.npy')
lm_kl_divergence = np.load('data/lm_kl_divergence.npy')
# calculate the sigmoid function
def sigmoid(x):
return 1.0 / (1 + exp(-x))
#storing weights for different classifier
weights = {
"unrelated" : [],
"agree" : [],
"disagree" : [],
"discuss" : []
}
#%%
def load_data():
train_x = []
train_y = []
for index in range(49972):
stance = stance_index.item().get(index+1)
x1 = word2vec_cosine_similarity.item().get(index+1)
x2 = tf_idf_similarity.item().get(index+1)
train_x.append([1.0, x1, x2])
if stance == 'unrelated':
train_y.append(float(0))
else:
train_y.append(float(1))
return mat(train_x), mat(train_y).transpose()
#%%
# train a logistic regression model using gradient descent
def train_logistic_classifier(train_x, train_y,class_name):
# calculate training time
startTime = time.time()
sample_size, feature_size = shape(train_x)
# hyperparameters setting
alpha = 0.01
max_iteration = 100
weights[class_name] = ones((feature_size, 1))
''' standard gradient descent
for k in range(max_iteration):
for i in range(sample_size):
output = sigmoid(train_x[i, :] * weights[class_name])
error = train_y[i, 0] - output
weights[class_name] = weights[class_name] + alpha * train_x[i, :].transpose() * error
'''
#improved gradient descent (smooth stochastic gradient descent)
for k in range(max_iteration):
# randomly select samples to optimize for reducing cycle fluctuations
dataIndex = list(range(sample_size))
for i in list(range(sample_size)):
alpha = 4.0 / (1.0 + k + i) + 0.001
randIndex = int(random.uniform(0, len(dataIndex)))
output = sigmoid(train_x[randIndex, :] * weights[class_name])
error = train_y[randIndex, 0] - output
weights[class_name] = weights[class_name] + alpha * train_x[randIndex, :].transpose() * error
del(dataIndex[randIndex]) # during one interation, delete the optimized sample
print ('Training took %fs!' % (time.time() - startTime))
return weights[class_name]
#%%
# test trained logistic classifier with given test set
# store index of relevant articles for second classifier
# store index of unrelated articles for calculation of overall accuracy
relevant_articles_index = []
unrelated_articles_index = []
def test_first_classifier(weights, test_x, test_y):
sample_size, feature_size = shape(test_x)
match_num = 0
for i in range(sample_size):
predict = sigmoid(test_x[i, :] * weights)[0, 0] > 0.5
if predict:
relevant_articles_index.append(i+1)
else:
unrelated_articles_index.append(i+1)
if predict == bool(test_y[i, 0]):
match_num += 1
accuracy = float(match_num) / sample_size
return accuracy
def load_relevant(x,stance_name):
train_x = []
train_y = []
for index in range(len(x)):
stance = stance_index.item().get(x[index])
x1 = word2vec_cosine_similarity.item().get(x[index])
x2 = tf_idf_similarity.item().get(x[index])
train_x.append([1.0, x1, x2])
if stance == stance_name:
train_y.append(float(1))
else:
train_y.append(float(0))
return mat(train_x), mat(train_y).transpose()
# train multi-class logistic classifier (after relevant-or-not classifier)
def train_one_vs_all():
#agree or not
train_x, train_y = load_relevant(relevant_articles_index,'agree')
test_x = train_x; test_y = train_y
weights['agree'] = train_logistic_classifier(train_x, train_y,'agree')
accuracy = test_logistic_classifier(weights['agree'], test_x, test_y)
print ('The accuracy for the agree-or-not classifier is: %.3f%%' % (accuracy * 100))
display_logistic_classifier(weights['agree'], train_x, train_y)
#disagree or not
train_x, train_y = load_relevant(relevant_articles_index,'disagree')
test_x = train_x; test_y = train_y
weights['disagree'] = train_logistic_classifier(train_x, train_y,'disagree')
accuracy = test_logistic_classifier(weights['disagree'], test_x, test_y)
print ('The accuracy for the disagree-or-not classifier is: %.3f%%' % (accuracy * 100))
display_logistic_classifier(weights['disagree'], train_x, train_y)
#discuss or not
train_x, train_y = load_relevant(relevant_articles_index,'discuss')
test_x = train_x; test_y = train_y
weights['discuss'] = train_logistic_classifier(train_x, train_y,'discuss')
accuracy = test_logistic_classifier(weights['discuss'], test_x, test_y)
print ('The accuracy for the discuss-or-not classifier is: %.3f%%' % (accuracy * 100))
display_logistic_classifier(weights['discuss'], train_x, train_y)
# predict with multi-class classifier
def one_vs_all_output(x,weights):
prediction = [sigmoid(x * weights['agree'])[0, 0],sigmoid(x * weights['disagree'])[0, 0],sigmoid(x * weights['discuss'])[0, 0]]
if prediction.index(np.amax(prediction)) == 0:
output = 'agree'
elif prediction.index(np.amax(prediction)) == 1:
output = 'disagree'
elif prediction.index(np.amax(prediction)) == 2:
output = 'discuss'
return output
def test_logistic_classifier(weights, test_x, test_y):
sample_size, feature_size = shape(test_x)
match_num = 0
for i in range(sample_size):
predict = sigmoid(test_x[i, :] * weights)[0, 0] > 0.5
if predict == bool(test_y[i, 0]):
match_num += 1
accuracy = float(match_num) / sample_size
return accuracy
def prepare_test_data(article_id):
test_x = []
test_y = []
for index in range(len(article_id)):
stance = stance_index.item().get(article_id[index])
x1 = word2vec_cosine_similarity.item().get(article_id[index])
x2 = tf_idf_similarity.item().get(article_id[index])
x_vec = [1.0,x1,x2]
test_x.append(x_vec)
if stance == 'unrelated':
test_y.append(float(0))
else:
test_y.append(float(1))
return mat(test_x), mat(test_y).transpose()
# test one-vs-all classifier
def test_one_vs_all(weights, x):
match_num = 0
#load data & compare predicted result with the true one
for index in range(len(x)):
stance = stance_index.item().get(x[index])
x1 = word2vec_cosine_similarity.item().get(x[index])
x2 = tf_idf_similarity.item().get(x[index])
x_vec = [1.0,x1,x2]
if one_vs_all_output(x_vec,weights) == stance:
match_num+=1
accuracy = float(match_num) / len(x)
return accuracy
# 2-D plot of trained logistic regression model
def display_logistic_classifier(weights, train_x, train_y):
# notice: train_x and train_y is mat datatype
sample_size, feature_size = shape(train_x)
# draw all samples
for i in range(sample_size):
if int(train_y[i, 0]) == 0:
plt.plot(train_x[i, 1], train_x[i, 2], 'ob')
elif int(train_y[i, 0]) == 1:
plt.plot(train_x[i, 1], train_x[i, 2], 'or')
# draw the classify line
min_x = min(train_x[:, 1])[0, 0]
max_x = max(train_x[:, 1])[0, 0]
weights = weights.getA() # convert mat to array
y_min_x = float(-weights[0] - weights[1] * min_x) / weights[2]
y_max_x = float(-weights[0] - weights[1] * max_x) / weights[2]
plt.plot([min_x, max_x], [y_min_x, y_max_x], '-g')
plt.xlabel('X1'); plt.ylabel('X2')
plt.show()
#%%
# Implementation with the pre-processed data
# Classifier 1
#step 1: load data
print ("step 1: load data...")
train_x, train_y = load_data()
test_x = train_x; test_y = train_y
# step 2: training first classifier (relevant or not)...
print ("step 2: training first classifier...")
weights['unrelated'] = train_logistic_classifier(train_x, train_y,'unrelated')
# step 3: first classifier test
print ("step 3: testing first classifier...")
accuracy = test_first_classifier(weights['unrelated'], test_x, test_y)
# step 4: first classifier result display
print ("step 4: show the result...")
print ('The accuracy for the unrelated-or-not classifier is: %.3f%%' % (accuracy * 100))
display_logistic_classifier(weights['unrelated'], train_x, train_y)
# Classifier 2
#step 5: training 'agree', 'disagree', 'discuss' classifier...
print ("step 6: training second classifier...")
train_one_vs_all()
#%%
#accuracy testing
test_first_classifier(weights['unrelated'], test_x, test_y)
testx1 = prepare_test_data(unrelated_articles_index)[0]
testy1 = prepare_test_data(unrelated_articles_index)[1]
accuracy1 = test_logistic_classifier(weights['unrelated'],testx1, testy1)
print('first classifier accuracy:')
print(accuracy1)
accuracy2 = test_one_vs_all(weights, relevant_articles_index)
print('second classifier accuracy:')
print(accuracy2)
overall_accuracy = (len(unrelated_articles_index)*accuracy1 + len(relevant_articles_index)*accuracy2)/(len(unrelated_articles_index)+len(relevant_articles_index))
print('overall accuracy:')
print(overall_accuracy) | [
"numpy.amax",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.load",
"time.time",
"matplotlib.pyplot.show"
] | [((226, 258), 'numpy.load', 'np.load', (['"""data/stance_index.npy"""'], {}), "('data/stance_index.npy')\n", (233, 258), True, 'import numpy as np\n'), ((289, 335), 'numpy.load', 'np.load', (['"""data/word2vec_cosine_similarity.npy"""'], {}), "('data/word2vec_cosine_similarity.npy')\n", (296, 335), True, 'import numpy as np\n'), ((359, 396), 'numpy.load', 'np.load', (['"""data/tf_idf_similarity.npy"""'], {}), "('data/tf_idf_similarity.npy')\n", (366, 396), True, 'import numpy as np\n'), ((417, 453), 'numpy.load', 'np.load', (['"""data/lm_kl_divergence.npy"""'], {}), "('data/lm_kl_divergence.npy')\n", (424, 453), True, 'import numpy as np\n'), ((1354, 1365), 'time.time', 'time.time', ([], {}), '()\n', (1363, 1365), False, 'import time\n'), ((8261, 8311), 'matplotlib.pyplot.plot', 'plt.plot', (['[min_x, max_x]', '[y_min_x, y_max_x]', '"""-g"""'], {}), "([min_x, max_x], [y_min_x, y_max_x], '-g')\n", (8269, 8311), True, 'import matplotlib.pyplot as plt\n'), ((8319, 8335), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X1"""'], {}), "('X1')\n", (8329, 8335), True, 'import matplotlib.pyplot as plt\n'), ((8337, 8353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X2"""'], {}), "('X2')\n", (8347, 8353), True, 'import matplotlib.pyplot as plt\n'), ((8361, 8371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8369, 8371), True, 'import matplotlib.pyplot as plt\n'), ((5747, 5766), 'numpy.amax', 'np.amax', (['prediction'], {}), '(prediction)\n', (5754, 5766), True, 'import numpy as np\n'), ((7796, 7840), 'matplotlib.pyplot.plot', 'plt.plot', (['train_x[i, 1]', 'train_x[i, 2]', '"""ob"""'], {}), "(train_x[i, 1], train_x[i, 2], 'ob')\n", (7804, 7840), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2681), 'time.time', 'time.time', ([], {}), '()\n', (2679, 2681), False, 'import time\n'), ((5827, 5846), 'numpy.amax', 'np.amax', (['prediction'], {}), '(prediction)\n', (5834, 5846), True, 'import numpy as np\n'), ((7897, 7941), 'matplotlib.pyplot.plot', 'plt.plot', (['train_x[i, 1]', 'train_x[i, 2]', '"""or"""'], {}), "(train_x[i, 1], train_x[i, 2], 'or')\n", (7905, 7941), True, 'import matplotlib.pyplot as plt\n'), ((5910, 5929), 'numpy.amax', 'np.amax', (['prediction'], {}), '(prediction)\n', (5917, 5929), True, 'import numpy as np\n')] |
from Redy.Typing import *
import unittest
import pytest
class Test_Redy_Tools_PathLib(unittest.TestCase):
@pytest.fixture(autouse=True)
def test_2309557027928(self):
from Redy.Tools import Path
p = Path('.')
p.abs()
p.is_dir()
p.list_dir()
p.parent()
p.__iter__()
new = p.into('justfortest')
new.mkdir()
new.mkdir()
assert "justfortest" in p
print(new._path)
new.delete()
p.relative()
tuple(p.collect(lambda _: _.endswith('.py')))
new.mkdir()
new.into('some').mkdir().into("somefile").open('w').close()
new.delete()
assert new == str(new)
root, *_ = new
print(f'0-th elem of arr{new._path}: ', new[0])
print(f'the elem where endswith .py of arr{new._path}', new[lambda _: _.endswith('.py')]) | [
"pytest.fixture",
"Redy.Tools.Path"
] | [((112, 140), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (126, 140), False, 'import pytest\n'), ((223, 232), 'Redy.Tools.Path', 'Path', (['"""."""'], {}), "('.')\n", (227, 232), False, 'from Redy.Tools import Path\n')] |
import os
import warnings
from pathlib import Path
from functools import partial
from torch.utils.data import IterableDataset
import webdataset as wds
import utils.logging as logging
from data.contrast import VideoDecoder
from data.build import DATASET_REGISTRY
from utils import distributed as du
logger = logging.get_logger(__name__)
class SplitByNode:
"""Selects a subset of urls based on Torch get_rank/get_world_size.
Used as a shard selection function in Dataset."""
def __init__(self, group=None):
self.rank = -1
self.size = -1
try:
import torch
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return
except Exception as e:
print(e)
return
if group is None:
# group = torch.distributed.group.WORLD
try:
# some versions of torch don't like group=None
import torch.distributed.distributed_c10d
group = torch.distributed.distributed_c10d._default_pg
except:
pass
self.rank = torch.distributed.get_rank(group=group)
self.size = torch.distributed.get_world_size(group=group)
def __call__(self, urls):
urls = [url for url in urls]
assert isinstance(urls, list)
if self.size > 1:
if self.rank == 0 and len(urls) < self.size:
warnings.warn(f"world_size {self.size} > num_shards {len(urls)}")
return urls[self.rank::self.size]
else:
return urls
def split_by_worker(urls):
"""Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset."""
import torch
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
wid = worker_info.id
num_workers = worker_info.num_workers
if wid == 0 and len(urls) < num_workers:
warnings.warn(f"num_workers {num_workers} > num_shards {len(urls)}")
return urls[wid::num_workers]
else:
return urls
def shard_selection(urls, nodesplitter, splitter):
return splitter(nodesplitter(urls))
@DATASET_REGISTRY.register()
def ACAV(cfg, mode):
"""
ACAV video loader with VideoDecoder. Videos are stored in POSIX tar
archives and we process them using WebDataset.
Args:
cfg (CfgNode): configs.
mode (string): Options include `pretrain` mode.
"""
assert mode in [
"pretrain"
], "Split '{}' not supported for ACAV".format(mode)
shards_path = sorted(Path(cfg.DATASET_DIR).glob("*.tar"))
assert cfg.DATA_LOADER.NUM_WORKERS <= len(shards_path)
s_idx = int(shards_path[0].stem.split("-")[1])
e_idx = int(shards_path[-1].stem.split("-")[1])
url = os.path.join(
f"{cfg.DATASET_DIR}",
f"shard-{{{s_idx:06d}..{e_idx:06d}}}.tar"
)
videodecoder = VideoDecoder(cfg)
batch_size = int(cfg.PRETRAIN.BATCH_SIZE / du.get_world_size())
if cfg.DATA_LOADER.NUM_WORKERS > 0:
length = int(cfg.PRETRAIN.DATASET_SIZE / (cfg.DATA_LOADER.NUM_WORKERS * du.get_world_size()))
nominal = int(length / batch_size) * cfg.DATA_LOADER.NUM_WORKERS * batch_size
else:
nominal = int(cfg.PRETRAIN.DATASET_SIZE / du.get_world_size())
length = nominal
nodesplitter = SplitByNode()
_shard_selection = partial(shard_selection, nodesplitter=nodesplitter, splitter=split_by_worker)
dataset = wds.Dataset(url, handler=wds.warn_and_continue, shard_selection=_shard_selection).shuffle(1000).map(videodecoder.decode, handler=wds.warn_and_continue)
dataset = wds.ResizedDataset(dataset, length, nominal)
return dataset
| [
"torch.distributed.is_available",
"data.build.DATASET_REGISTRY.register",
"webdataset.Dataset",
"data.contrast.VideoDecoder",
"webdataset.ResizedDataset",
"utils.distributed.get_world_size",
"pathlib.Path",
"torch.utils.data.get_worker_info",
"os.path.join",
"torch.distributed.is_initialized",
"functools.partial",
"torch.distributed.get_rank",
"utils.logging.get_logger",
"torch.distributed.get_world_size"
] | [((312, 340), 'utils.logging.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (330, 340), True, 'import utils.logging as logging\n'), ((2298, 2325), 'data.build.DATASET_REGISTRY.register', 'DATASET_REGISTRY.register', ([], {}), '()\n', (2323, 2325), False, 'from data.build import DATASET_REGISTRY\n'), ((1861, 1895), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (1893, 1895), False, 'import torch\n'), ((2916, 2993), 'os.path.join', 'os.path.join', (['f"""{cfg.DATASET_DIR}"""', 'f"""shard-{{{s_idx:06d}..{e_idx:06d}}}.tar"""'], {}), "(f'{cfg.DATASET_DIR}', f'shard-{{{s_idx:06d}..{e_idx:06d}}}.tar')\n", (2928, 2993), False, 'import os\n'), ((3036, 3053), 'data.contrast.VideoDecoder', 'VideoDecoder', (['cfg'], {}), '(cfg)\n', (3048, 3053), False, 'from data.contrast import VideoDecoder\n'), ((3512, 3589), 'functools.partial', 'partial', (['shard_selection'], {'nodesplitter': 'nodesplitter', 'splitter': 'split_by_worker'}), '(shard_selection, nodesplitter=nodesplitter, splitter=split_by_worker)\n', (3519, 3589), False, 'from functools import partial\n'), ((3770, 3814), 'webdataset.ResizedDataset', 'wds.ResizedDataset', (['dataset', 'length', 'nominal'], {}), '(dataset, length, nominal)\n', (3788, 3814), True, 'import webdataset as wds\n'), ((1147, 1186), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {'group': 'group'}), '(group=group)\n', (1173, 1186), False, 'import torch\n'), ((1207, 1252), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {'group': 'group'}), '(group=group)\n', (1239, 1252), False, 'import torch\n'), ((3101, 3120), 'utils.distributed.get_world_size', 'du.get_world_size', ([], {}), '()\n', (3118, 3120), True, 'from utils import distributed as du\n'), ((2706, 2727), 'pathlib.Path', 'Path', (['cfg.DATASET_DIR'], {}), '(cfg.DATASET_DIR)\n', (2710, 2727), False, 'from pathlib import Path\n'), ((3410, 3429), 'utils.distributed.get_world_size', 'du.get_world_size', ([], {}), '()\n', (3427, 3429), True, 'from utils import distributed as du\n'), ((629, 661), 'torch.distributed.is_available', 'torch.distributed.is_available', ([], {}), '()\n', (659, 661), False, 'import torch\n'), ((669, 703), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (701, 703), False, 'import torch\n'), ((3242, 3261), 'utils.distributed.get_world_size', 'du.get_world_size', ([], {}), '()\n', (3259, 3261), True, 'from utils import distributed as du\n'), ((3604, 3690), 'webdataset.Dataset', 'wds.Dataset', (['url'], {'handler': 'wds.warn_and_continue', 'shard_selection': '_shard_selection'}), '(url, handler=wds.warn_and_continue, shard_selection=\n _shard_selection)\n', (3615, 3690), True, 'import webdataset as wds\n')] |
#!/usr/bin/env python3
from argparse import ArgumentParser
import faiss
import numpy as np
from chg.defaults import CHG_PROJ_FAISS
from chg.db.database import get_store
from chg.embed.basic import (
BasicEmbedder,
normalize_vectors,
)
def load_vectors():
store = get_store()
rows = store.run_query(
"SELECT code_embedding FROM Embeddings ORDER BY chunk_id"
)
code_embeddings = [store.blob_to_array(row[0]) for row in rows]
mat = np.array(code_embeddings, dtype=np.float32)
return mat
def build_index(mat):
mat = normalize_vectors(mat).astype(np.float32)
index = faiss.IndexFlatIP(mat.shape[-1])
index.add(mat)
return index
def embed_query(model, query):
return model.embed_nl(query)
def load_index():
return faiss.read_index(CHG_PROJ_FAISS)
def run_query(index, embedding, k):
# make sure row vector
embedding = embedding.reshape(1, -1)
embedding = normalize_vectors(embedding).astype(np.float32)
D, ix = index.search(embedding, k)
return ix.flatten()
def lookup_in_store(store, ixs):
# ixs are offset by 1 as ids in the database
ids = ixs + 1
return store.get_dialogue_by_ids(ids)
class EmbeddedSearcher(object):
def __init__(self):
self.embed_model = BasicEmbedder()
self.store = get_store()
self.faiss_index = load_index()
def search(self, query, k=5):
vector = embed_query(self.embed_model, query)
assert k > 0
ixs = run_query(self.faiss_index, vector, k)
return lookup_in_store(self.store, ixs)
def build(args):
assert args.action == "build"
mat = load_vectors()
index = build_index(mat)
faiss.write_index(index, CHG_PROJ_FAISS)
def query_from_cli(args):
assert args.action == "query"
searcher = EmbeddedSearcher()
return searcher.search(args.query, k=args.k)
def get_args():
parser = ArgumentParser(
description="Semantic search based on embedded queries"
)
subparsers = parser.add_subparsers(help="Semantic search actions")
build_parser = subparsers.add_parser("build")
build_parser.set_defaults(action="build")
query_parser = subparsers.add_parser("query")
query_parser.set_defaults(action="query")
query_parser.add_argument(
"--query",
type=str,
help="Query to search with",
)
query_parser.add_argument(
"--k",
type=int,
help="Number of records to return for query",
default=5,
)
parser.set_defaults(action="build")
return parser.parse_args()
def main():
args = get_args()
if args.action == "build":
build(args)
elif args.action == "query":
query_from_cli(args)
else:
raise Exception("Unknown action:", args.action)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
| [
"faiss.write_index",
"argparse.ArgumentParser",
"pdb.post_mortem",
"faiss.read_index",
"numpy.array",
"faiss.IndexFlatIP",
"chg.embed.basic.normalize_vectors",
"chg.db.database.get_store",
"chg.embed.basic.BasicEmbedder"
] | [((279, 290), 'chg.db.database.get_store', 'get_store', ([], {}), '()\n', (288, 290), False, 'from chg.db.database import get_store\n'), ((469, 512), 'numpy.array', 'np.array', (['code_embeddings'], {'dtype': 'np.float32'}), '(code_embeddings, dtype=np.float32)\n', (477, 512), True, 'import numpy as np\n'), ((616, 648), 'faiss.IndexFlatIP', 'faiss.IndexFlatIP', (['mat.shape[-1]'], {}), '(mat.shape[-1])\n', (633, 648), False, 'import faiss\n'), ((782, 814), 'faiss.read_index', 'faiss.read_index', (['CHG_PROJ_FAISS'], {}), '(CHG_PROJ_FAISS)\n', (798, 814), False, 'import faiss\n'), ((1688, 1728), 'faiss.write_index', 'faiss.write_index', (['index', 'CHG_PROJ_FAISS'], {}), '(index, CHG_PROJ_FAISS)\n', (1705, 1728), False, 'import faiss\n'), ((1905, 1976), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Semantic search based on embedded queries"""'}), "(description='Semantic search based on embedded queries')\n", (1919, 1976), False, 'from argparse import ArgumentParser\n'), ((1277, 1292), 'chg.embed.basic.BasicEmbedder', 'BasicEmbedder', ([], {}), '()\n', (1290, 1292), False, 'from chg.embed.basic import BasicEmbedder, normalize_vectors\n'), ((1314, 1325), 'chg.db.database.get_store', 'get_store', ([], {}), '()\n', (1323, 1325), False, 'from chg.db.database import get_store\n'), ((562, 584), 'chg.embed.basic.normalize_vectors', 'normalize_vectors', (['mat'], {}), '(mat)\n', (579, 584), False, 'from chg.embed.basic import BasicEmbedder, normalize_vectors\n'), ((937, 965), 'chg.embed.basic.normalize_vectors', 'normalize_vectors', (['embedding'], {}), '(embedding)\n', (954, 965), False, 'from chg.embed.basic import BasicEmbedder, normalize_vectors\n'), ((2905, 2922), 'pdb.post_mortem', 'pdb.post_mortem', ([], {}), '()\n', (2920, 2922), False, 'import pdb\n')] |
#!/usr/bin/env python
import os, sys
import pickle
import matchzoo as mz
from matchzoo.data_pack import pack
import time
import numpy as np
np.random.seed(int(time.time()))
from matchzoo.models.dssm import DSSM
from shutil import rmtree
sys.path.append('.')
from scripts.data_convert.matchzoo_reader \
import readWhiteSpacedMatchZooData, WhiteSpacePreprocessor
colName = sys.argv[1]
modelFile = sys.argv[2]
epochQty = int(sys.argv[3])
dataTranFile = os.path.join('matchZooTrain', colName, 'data_transform.bin')
dataFileTrain = os.path.join('matchZooTrain', colName, 'tran_neg10.tsv')
dataFileTest = os.path.join('matchZooTrain', colName, 'dev1_allCand.tsv')
print(f'Collection: {colName} # of epochs: {epochQty} model file: {modelFile} data transform file: {dataTranFile}')
# Note dtype! don't let Pandas guess column data types!
dataTrainPacked = pack(readWhiteSpacedMatchZooData(dataFileTrain))
dataTestPacked = pack(readWhiteSpacedMatchZooData(dataFileTest))
# prep = mz.preprocessors.BasicPreprocessor()
prep = WhiteSpacePreprocessor()
#import pdb, sys
# try:
if True:
if os.path.exists(dataTranFile):
print(f'Loading existing preprocessor from {dataTranFile}')
with open(dataTranFile, 'rb') as f:
prep = pickle.load(f)
else:
print(f'Fitting a new preprocessor')
# For some reason, we fit the preprocessor to packed data
prep.fit(dataTrainPacked)
print('Preprocessor context:')
print(prep.context)
with open(dataTranFile, 'wb') as of:
pickle.dump(prep, of)
print('Data transformer is fitted and saved!')
dataTrainProc = prep.transform(dataTrainPacked)
dataTestProc = prep.transform(dataTestPacked)
if os.path.exists(modelFile):
print('Loading the model from: ' + modelFile)
model = mz.load_model(modelFile)
model.backend.summary()
else:
print('Creating a model from scratch')
model = DSSM()
model.params.update(prep.context)
model.params['mlp_num_layers'] = 5
model.params['mlp_num_units'] = 5000
model.params['mlp_num_fan_out'] = 128
model.params['mlp_activation_func'] = 'relu'
model.guess_and_fill_missing_params(verbose=0)
print("Params completed", model.params.completed())
model.build()
model.compile()
model.backend.summary()
# This needs to use the processed data!
xTrain, yTrain = dataTrainProc.unpack()
model.fit(xTrain, yTrain, batch_size=128, epochs=epochQty)
if os.path.exists(modelFile):
rmtree(modelFile)
model.save(modelFile)
# except:
# tb is traceback
# type, value, tb = sys.exc_info()
# pdb.post_mortem(tb)
| [
"scripts.data_convert.matchzoo_reader.readWhiteSpacedMatchZooData",
"os.path.exists",
"pickle.dump",
"scripts.data_convert.matchzoo_reader.WhiteSpacePreprocessor",
"matchzoo.load_model",
"os.path.join",
"matchzoo.models.dssm.DSSM",
"pickle.load",
"shutil.rmtree",
"time.time",
"sys.path.append"
] | [((241, 261), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (256, 261), False, 'import os, sys\n'), ((461, 521), 'os.path.join', 'os.path.join', (['"""matchZooTrain"""', 'colName', '"""data_transform.bin"""'], {}), "('matchZooTrain', colName, 'data_transform.bin')\n", (473, 521), False, 'import os, sys\n'), ((538, 594), 'os.path.join', 'os.path.join', (['"""matchZooTrain"""', 'colName', '"""tran_neg10.tsv"""'], {}), "('matchZooTrain', colName, 'tran_neg10.tsv')\n", (550, 594), False, 'import os, sys\n'), ((610, 668), 'os.path.join', 'os.path.join', (['"""matchZooTrain"""', 'colName', '"""dev1_allCand.tsv"""'], {}), "('matchZooTrain', colName, 'dev1_allCand.tsv')\n", (622, 668), False, 'import os, sys\n'), ((1029, 1053), 'scripts.data_convert.matchzoo_reader.WhiteSpacePreprocessor', 'WhiteSpacePreprocessor', ([], {}), '()\n', (1051, 1053), False, 'from scripts.data_convert.matchzoo_reader import readWhiteSpacedMatchZooData, WhiteSpacePreprocessor\n'), ((866, 908), 'scripts.data_convert.matchzoo_reader.readWhiteSpacedMatchZooData', 'readWhiteSpacedMatchZooData', (['dataFileTrain'], {}), '(dataFileTrain)\n', (893, 908), False, 'from scripts.data_convert.matchzoo_reader import readWhiteSpacedMatchZooData, WhiteSpacePreprocessor\n'), ((932, 973), 'scripts.data_convert.matchzoo_reader.readWhiteSpacedMatchZooData', 'readWhiteSpacedMatchZooData', (['dataFileTest'], {}), '(dataFileTest)\n', (959, 973), False, 'from scripts.data_convert.matchzoo_reader import readWhiteSpacedMatchZooData, WhiteSpacePreprocessor\n'), ((1097, 1125), 'os.path.exists', 'os.path.exists', (['dataTranFile'], {}), '(dataTranFile)\n', (1111, 1125), False, 'import os, sys\n'), ((1724, 1749), 'os.path.exists', 'os.path.exists', (['modelFile'], {}), '(modelFile)\n', (1738, 1749), False, 'import os, sys\n'), ((2542, 2567), 'os.path.exists', 'os.path.exists', (['modelFile'], {}), '(modelFile)\n', (2556, 2567), False, 'import os, sys\n'), ((161, 172), 'time.time', 'time.time', ([], {}), '()\n', (170, 172), False, 'import time\n'), ((1539, 1560), 'pickle.dump', 'pickle.dump', (['prep', 'of'], {}), '(prep, of)\n', (1550, 1560), False, 'import pickle\n'), ((1821, 1845), 'matchzoo.load_model', 'mz.load_model', (['modelFile'], {}), '(modelFile)\n', (1834, 1845), True, 'import matchzoo as mz\n'), ((1952, 1958), 'matchzoo.models.dssm.DSSM', 'DSSM', ([], {}), '()\n', (1956, 1958), False, 'from matchzoo.models.dssm import DSSM\n'), ((2577, 2594), 'shutil.rmtree', 'rmtree', (['modelFile'], {}), '(modelFile)\n', (2583, 2594), False, 'from shutil import rmtree\n'), ((1258, 1272), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1269, 1272), False, 'import pickle\n')] |
import fileinput
import numpy as np
def parse_line(line):
line = line.rstrip()
components = line.split(" -> ")
origin = tuple(int(x) for x in components[0].split(","))
destination = tuple(int(x) for x in components[1].split(","))
return origin, destination
def is_horizontal_line(origin, destination):
return origin[0] == destination[0]
def is_vertical_line(origin, destination):
return origin[1] == destination[1]
def is_diagonal_line(origin, destination):
delta_x = abs(origin[0] - destination[0])
delta_y = abs(origin[1] - destination[1])
return delta_x == delta_y
def is_straight_line(origin, destination):
return is_horizontal_line(origin, destination) or is_vertical_line(origin, destination)
def is_straight_or_diagonal_line(origin, destination):
return is_straight_line(origin, destination) or is_diagonal_line(origin, destination)
def import_hydrothermal_lines(path, filter_fun=is_straight_line):
hydrothermal_lines = []
for line in fileinput.input(path):
origin, destination = parse_line(line)
if filter_fun(origin, destination):
hydrothermal_lines.append([origin, destination])
return hydrothermal_lines
def get_x_size(hydrothermal_lines):
return max([max(origin[0],destination[0]) for origin, destination in hydrothermal_lines]) + 1
def get_y_size(hydrothermal_lines):
return max([max(origin[1],destination[1]) for origin, destination in hydrothermal_lines]) + 1
def make_empty_hydrothermal_map(hydrothermal_lines):
x_size = get_x_size(hydrothermal_lines)
y_size = get_y_size(hydrothermal_lines)
return np.zeros(shape=(x_size, y_size))
def _get_min_y_max_y(origin, destination):
min_y = min(origin[1], destination[1])
max_y = max(origin[1], destination[1]) +1
return min_y, max_y
def add_horizontal_line(hydrothermal_map, origin, destination):
min_y, max_y = _get_min_y_max_y(origin, destination)
x = origin[0]
hydrothermal_map[x,min_y:max_y] += 1
def _get_min_x_max_x(origin, destination):
min_x = min(origin[0], destination[0])
max_x = max(origin[0], destination[0]) +1
return min_x, max_x
def add_vertical_line(hydrothermal_map, origin, destination):
min_x, max_x = _get_min_x_max_x(origin, destination)
y = origin[1]
hydrothermal_map[min_x:max_x, y] += 1
def add_diagonal_line(hydrothermal_map, origin, destination):
delta_x = destination[0] - origin[0]
delta_y = destination[1] - origin[1]
for offset in range(abs(delta_x)+1):
x = origin[0] + np.sign(delta_x)*offset
y = origin[1] + np.sign(delta_y)*offset
hydrothermal_map[x,y] += 1
def add_line(hydrothermal_map, origin, destination):
if is_horizontal_line(origin, destination):
add_horizontal_line(hydrothermal_map, origin, destination)
elif is_vertical_line(origin, destination):
add_vertical_line(hydrothermal_map, origin, destination)
elif is_diagonal_line(origin, destination):
add_diagonal_line(hydrothermal_map, origin, destination)
else:
raise ValueError(f"{origin} -> {destination} is not a straight line")
def populate_hydrothermal_map(hydrothermal_map, hydrothermal_lines):
for origin, destination in hydrothermal_lines:
add_line(hydrothermal_map, origin, destination)
| [
"numpy.sign",
"numpy.zeros",
"fileinput.input"
] | [((1005, 1026), 'fileinput.input', 'fileinput.input', (['path'], {}), '(path)\n', (1020, 1026), False, 'import fileinput\n'), ((1633, 1665), 'numpy.zeros', 'np.zeros', ([], {'shape': '(x_size, y_size)'}), '(shape=(x_size, y_size))\n', (1641, 1665), True, 'import numpy as np\n'), ((2556, 2572), 'numpy.sign', 'np.sign', (['delta_x'], {}), '(delta_x)\n', (2563, 2572), True, 'import numpy as np\n'), ((2604, 2620), 'numpy.sign', 'np.sign', (['delta_y'], {}), '(delta_y)\n', (2611, 2620), True, 'import numpy as np\n')] |
import unittest
from imageio import imread
import imagecaptioning
from imagecaptioning.definitions import ROOT_DIR
class TestGeneratingCaptions(unittest.TestCase):
def test_captions(self):
word_map_file = ROOT_DIR/'data/processed'/'WORDMAP_coco_5_cap_per_img_5_min_word_freq.json'
checkpoint = ROOT_DIR/'models/best_checkpoint_only_state_dict.pth.tar'
model = imagecaptioning.ImageCaptioner(word_map_file=word_map_file,
checkpoint=checkpoint)
# load image
img_path = ROOT_DIR/'imgs/football.jpg'
img = imread(img_path)
sent = model.gen_caption(img)
print(sent)
# extract encoder and decoder
self.encoder = model.encoder
self.decoder = model.decoder
def test_caption_with_encoder_decoder(self):
word_map_file = ROOT_DIR/'data/processed'/'WORDMAP_coco_5_cap_per_img_5_min_word_freq.json'
checkpoint = ROOT_DIR/'models/best_checkpoint_only_state_dict.pth.tar'
model = imagecaptioning.ImageCaptioner(word_map_file=word_map_file,
checkpoint=checkpoint)
# init new model from encoder and decoder
model_from_encoder_decoder = imagecaptioning.ImageCaptioner(
word_map_file=word_map_file,
encoder=model.encoder,
decoder=model.decoder)
# load_img
img_path = ROOT_DIR/'imgs/football.jpg'
img = imread(img_path)
sent = model_from_encoder_decoder.gen_caption(img)
print(sent)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"imageio.imread",
"imagecaptioning.ImageCaptioner"
] | [((1718, 1733), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1731, 1733), False, 'import unittest\n'), ((391, 478), 'imagecaptioning.ImageCaptioner', 'imagecaptioning.ImageCaptioner', ([], {'word_map_file': 'word_map_file', 'checkpoint': 'checkpoint'}), '(word_map_file=word_map_file, checkpoint=\n checkpoint)\n', (421, 478), False, 'import imagecaptioning\n'), ((605, 621), 'imageio.imread', 'imread', (['img_path'], {}), '(img_path)\n', (611, 621), False, 'from imageio import imread\n'), ((1039, 1126), 'imagecaptioning.ImageCaptioner', 'imagecaptioning.ImageCaptioner', ([], {'word_map_file': 'word_map_file', 'checkpoint': 'checkpoint'}), '(word_map_file=word_map_file, checkpoint=\n checkpoint)\n', (1069, 1126), False, 'import imagecaptioning\n'), ((1257, 1367), 'imagecaptioning.ImageCaptioner', 'imagecaptioning.ImageCaptioner', ([], {'word_map_file': 'word_map_file', 'encoder': 'model.encoder', 'decoder': 'model.decoder'}), '(word_map_file=word_map_file, encoder=model.\n encoder, decoder=model.decoder)\n', (1287, 1367), False, 'import imagecaptioning\n'), ((1590, 1606), 'imageio.imread', 'imread', (['img_path'], {}), '(img_path)\n', (1596, 1606), False, 'from imageio import imread\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
import os
from shutil import move as sh_mv
from re import match as re_match
from f4pga.common import *
from f4pga.module import Module, ModuleContext
def default_output_name(place_constraints):
p = place_constraints
m = re_match('(.*)\\.[^.]*$', place_constraints)
if m:
return m.groups()[0] + '.place'
return f'{p}.place'
def place_constraints_file(ctx: ModuleContext):
p = ctx.takes.place_constraints
if p:
return p, False
p = ctx.takes.io_place
if p:
return p, False
return f'{Path(ctx.takes.eblif).stem}.place', True
class PlaceModule(Module):
def map_io(self, ctx: ModuleContext):
mapping = {}
p, _ = place_constraints_file(ctx)
mapping['place'] = default_output_name(p)
return mapping
def execute(self, ctx: ModuleContext):
place_constraints, dummy = place_constraints_file(ctx)
place_constraints = os.path.realpath(place_constraints)
if dummy:
with open(place_constraints, 'wb') as f:
f.write(b'')
build_dir = str(Path(ctx.takes.eblif).parent)
vpr_options = ['--fix_clusters', place_constraints]
yield 'Running VPR...'
vprargs = VprArgs(ctx.share, ctx.takes.eblif, ctx.values,
sdc_file=ctx.takes.sdc, vpr_extra_opts=vpr_options)
vpr('place', vprargs, cwd=build_dir)
# VPR names output on its own. If user requested another name, the
# output file should be moved.
# TODO: This extends the set of names that would cause collisions.
# As for now (22-07-2021), no collision detection is being done, but
# when the problem gets tackled, we should keep in mind that VPR-based
# modules may produce some temporary files with names that differ from
# the ones in flow configuration.
if ctx.is_output_explicit('place'):
output_file = default_output_name(place_constraints)
sh_mv(output_file, ctx.outputs.place)
yield 'Saving log...'
save_vpr_log('place.log', build_dir=build_dir)
def __init__(self, _):
self.name = 'place'
self.no_of_phases = 2
self.takes = [
'eblif',
'sdc?',
'place_constraints?',
'io_place?'
]
self.produces = [ 'place' ]
self.values = [
'device',
'vpr_options?'
] + vpr_specific_values()
ModuleClass = PlaceModule
| [
"os.path.realpath",
"re.match",
"pathlib.Path",
"shutil.move"
] | [((926, 970), 're.match', 're_match', (['"""(.*)\\\\.[^.]*$"""', 'place_constraints'], {}), "('(.*)\\\\.[^.]*$', place_constraints)\n", (934, 970), True, 'from re import match as re_match\n'), ((1625, 1660), 'os.path.realpath', 'os.path.realpath', (['place_constraints'], {}), '(place_constraints)\n', (1641, 1660), False, 'import os\n'), ((2686, 2723), 'shutil.move', 'sh_mv', (['output_file', 'ctx.outputs.place'], {}), '(output_file, ctx.outputs.place)\n', (2691, 2723), True, 'from shutil import move as sh_mv\n'), ((1786, 1807), 'pathlib.Path', 'Path', (['ctx.takes.eblif'], {}), '(ctx.takes.eblif)\n', (1790, 1807), False, 'from pathlib import Path\n'), ((1240, 1261), 'pathlib.Path', 'Path', (['ctx.takes.eblif'], {}), '(ctx.takes.eblif)\n', (1244, 1261), False, 'from pathlib import Path\n')] |
import GPy
import numpy as np
import pytest
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
from bopy.benchmark_functions import forrester
from bopy.exceptions import NotFittedError
from bopy.surrogate import GPyGPSurrogate, ScipyGPSurrogate
n_samples = 10
@pytest.fixture(scope="module", autouse=True)
def x():
return np.linspace(0, 1, n_samples).reshape(-1, 1)
@pytest.fixture(scope="module", autouse=True)
def y(x):
return forrester(x)
def scipy_gp_surrogate():
return ScipyGPSurrogate(
gp=GaussianProcessRegressor(kernel=Matern(nu=1.5), alpha=1e-5, normalize_y=True)
)
def gpy_gp_surrogate():
def gp_initializer(x, y):
return GPy.models.GPRegression(
x, y, kernel=GPy.kern.RBF(input_dim=1), noise_var=1e-5, normalizer=True
)
return GPyGPSurrogate(gp_initializer=gp_initializer)
@pytest.fixture(
scope="module",
autouse=True,
params=[scipy_gp_surrogate(), gpy_gp_surrogate()],
ids=["scipy_gp", "gpy_gp"],
)
def surrogate(request):
return request.param
@pytest.fixture(scope="class")
def trained_surrogate(surrogate, x, y):
surrogate.fit(x, y)
return surrogate
class TestArgumentsToFit:
def test_x_must_contain_at_least_one_sample(self, surrogate):
with pytest.raises(ValueError, match="`x` must contain at least one sample"):
surrogate.fit(x=np.array([]), y=np.array([1.0]))
def test_y_must_contain_at_least_one_sample(self, surrogate):
with pytest.raises(ValueError, match="`y` must contain at least one sample"):
surrogate.fit(x=np.array([[1.0]]), y=np.array([]))
def test_x_and_y_must_contain_the_same_number_of_samples(self, surrogate):
with pytest.raises(
ValueError, match="`x` and `y` must have the same number of samples"
):
surrogate.fit(x=np.array([[1.0]]), y=np.array([1.0, 1.0]))
def test_x_must_be_2d(self, surrogate):
with pytest.raises(ValueError, match="`x` must be 2D"):
surrogate.fit(x=np.array([[[1.0]]]), y=np.array([1.0]))
def test_y_must_be_1d(self, surrogate):
with pytest.raises(ValueError, match="`y` must be 1D"):
surrogate.fit(x=np.array([[1.0]]), y=np.array([[1.0]]))
class TestBeforeFitting:
def test_calling_predict_raises_not_fitted_error(self, surrogate, x):
with pytest.raises(NotFittedError, match="must be fitted first"):
surrogate.predict(x)
class TestArgumentsToPredictAfterFitting:
def test_x_must_contain_at_least_one_sample(self, trained_surrogate):
with pytest.raises(ValueError, match="`x` must contain at least one sample"):
trained_surrogate.predict(x=np.array([]))
def test_x_must_be_2d(self, trained_surrogate):
with pytest.raises(ValueError, match="`x` must be 2D"):
trained_surrogate.predict(x=np.array([1.0]))
def test_x_must_have_the_same_number_of_dimensions_as_the_training_data(
self, trained_surrogate
):
with pytest.raises(
ValueError,
match="`x` must have the same number of dimensions as the training data",
):
trained_surrogate.predict(x=np.array([[1.0, 1.0]]))
class TestAfterPredicting:
@pytest.fixture(scope="class", autouse=True)
def predictions(self, trained_surrogate, x):
return trained_surrogate.predict(x)
@pytest.fixture(scope="class", autouse=True)
def predicted_mean(self, predictions):
return predictions[0]
@pytest.fixture(scope="class", autouse=True)
def predicted_var(self, predictions):
return predictions[1]
def test_predicted_mean_is_the_correct_shape(self, predicted_mean):
assert predicted_mean.shape == (n_samples,)
def test_predicted_var_is_the_correct_shape(self, predicted_var):
assert predicted_var.shape == (n_samples, n_samples)
def test_reference_to_x_is_stored(self, trained_surrogate, x):
assert np.array_equal(trained_surrogate.x, x)
def test_reference_to_y_is_stored(self, trained_surrogate, y):
assert np.array_equal(trained_surrogate.y, y)
| [
"GPy.kern.RBF",
"bopy.surrogate.GPyGPSurrogate",
"bopy.benchmark_functions.forrester",
"numpy.array",
"numpy.linspace",
"numpy.array_equal",
"pytest.raises",
"pytest.fixture",
"sklearn.gaussian_process.kernels.Matern"
] | [((328, 372), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (342, 372), False, 'import pytest\n'), ((440, 484), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (454, 484), False, 'import pytest\n'), ((1117, 1146), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (1131, 1146), False, 'import pytest\n'), ((506, 518), 'bopy.benchmark_functions.forrester', 'forrester', (['x'], {}), '(x)\n', (515, 518), False, 'from bopy.benchmark_functions import forrester\n'), ((873, 918), 'bopy.surrogate.GPyGPSurrogate', 'GPyGPSurrogate', ([], {'gp_initializer': 'gp_initializer'}), '(gp_initializer=gp_initializer)\n', (887, 918), False, 'from bopy.surrogate import GPyGPSurrogate, ScipyGPSurrogate\n'), ((3318, 3361), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""', 'autouse': '(True)'}), "(scope='class', autouse=True)\n", (3332, 3361), False, 'import pytest\n'), ((3461, 3504), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""', 'autouse': '(True)'}), "(scope='class', autouse=True)\n", (3475, 3504), False, 'import pytest\n'), ((3584, 3627), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""', 'autouse': '(True)'}), "(scope='class', autouse=True)\n", (3598, 3627), False, 'import pytest\n'), ((4040, 4078), 'numpy.array_equal', 'np.array_equal', (['trained_surrogate.x', 'x'], {}), '(trained_surrogate.x, x)\n', (4054, 4078), True, 'import numpy as np\n'), ((4162, 4200), 'numpy.array_equal', 'np.array_equal', (['trained_surrogate.y', 'y'], {}), '(trained_surrogate.y, y)\n', (4176, 4200), True, 'import numpy as np\n'), ((393, 421), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_samples'], {}), '(0, 1, n_samples)\n', (404, 421), True, 'import numpy as np\n'), ((1339, 1410), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`x` must contain at least one sample"""'}), "(ValueError, match='`x` must contain at least one sample')\n", (1352, 1410), False, 'import pytest\n'), ((1553, 1624), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`y` must contain at least one sample"""'}), "(ValueError, match='`y` must contain at least one sample')\n", (1566, 1624), False, 'import pytest\n'), ((1782, 1870), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`x` and `y` must have the same number of samples"""'}), "(ValueError, match=\n '`x` and `y` must have the same number of samples')\n", (1795, 1870), False, 'import pytest\n'), ((2018, 2067), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`x` must be 2D"""'}), "(ValueError, match='`x` must be 2D')\n", (2031, 2067), False, 'import pytest\n'), ((2195, 2244), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`y` must be 1D"""'}), "(ValueError, match='`y` must be 1D')\n", (2208, 2244), False, 'import pytest\n'), ((2428, 2487), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {'match': '"""must be fitted first"""'}), "(NotFittedError, match='must be fitted first')\n", (2441, 2487), False, 'import pytest\n'), ((2653, 2724), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`x` must contain at least one sample"""'}), "(ValueError, match='`x` must contain at least one sample')\n", (2666, 2724), False, 'import pytest\n'), ((2846, 2895), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`x` must be 2D"""'}), "(ValueError, match='`x` must be 2D')\n", (2859, 2895), False, 'import pytest\n'), ((3084, 3188), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`x` must have the same number of dimensions as the training data"""'}), "(ValueError, match=\n '`x` must have the same number of dimensions as the training data')\n", (3097, 3188), False, 'import pytest\n'), ((792, 817), 'GPy.kern.RBF', 'GPy.kern.RBF', ([], {'input_dim': '(1)'}), '(input_dim=1)\n', (804, 817), False, 'import GPy\n'), ((619, 633), 'sklearn.gaussian_process.kernels.Matern', 'Matern', ([], {'nu': '(1.5)'}), '(nu=1.5)\n', (625, 633), False, 'from sklearn.gaussian_process.kernels import Matern\n'), ((1440, 1452), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1448, 1452), True, 'import numpy as np\n'), ((1456, 1471), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1464, 1471), True, 'import numpy as np\n'), ((1654, 1671), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (1662, 1671), True, 'import numpy as np\n'), ((1675, 1687), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1683, 1687), True, 'import numpy as np\n'), ((1917, 1934), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (1925, 1934), True, 'import numpy as np\n'), ((1938, 1958), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (1946, 1958), True, 'import numpy as np\n'), ((2097, 2116), 'numpy.array', 'np.array', (['[[[1.0]]]'], {}), '([[[1.0]]])\n', (2105, 2116), True, 'import numpy as np\n'), ((2120, 2135), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2128, 2135), True, 'import numpy as np\n'), ((2274, 2291), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (2282, 2291), True, 'import numpy as np\n'), ((2295, 2312), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (2303, 2312), True, 'import numpy as np\n'), ((2766, 2778), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2774, 2778), True, 'import numpy as np\n'), ((2937, 2952), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2945, 2952), True, 'import numpy as np\n'), ((3260, 3282), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (3268, 3282), True, 'import numpy as np\n')] |
"""Tests the timer module."""
import unittest
import time
# from pasypy import formula_parser, variables
from pasypy.timer import Timer
class TestTimer(unittest.TestCase):
"""Tests the timer module."""
@classmethod
def setUpClass(cls):
cls.timer = Timer()
def test_timer(self):
"""Checks if calculation of time is correct and precisely."""
self.assertEqual(0, self.timer.get_time('Computation'))
self.timer.create_timestamp('Computation')
time.sleep(0.02)
self.timer.calculate_time('Computation')
self.assertAlmostEqual(0.02, self.timer.get_time('Computation'), places=2)
self.assertEqual(0, self.timer.get_time('Visualization'))
self.timer.create_timestamp('Visualization')
time.sleep(0.01)
self.timer.calculate_time('Visualization')
self.assertAlmostEqual(0.01, self.timer.get_time('Visualization'), places=2)
def test_invalid_key(self):
"""Checks if invalid keys are rejected."""
with self.assertRaises(KeyError):
self.assertEqual(0, self.timer.get_time('Invalid Key'))
self.timer.create_timestamp('Invalid Key')
time.sleep(0.01)
self.timer.calculate_time('Invalid Key')
self.assertAlmostEqual(0.01, self.timer.get_time('Invalid Key'), places=2)
| [
"time.sleep",
"pasypy.timer.Timer"
] | [((273, 280), 'pasypy.timer.Timer', 'Timer', ([], {}), '()\n', (278, 280), False, 'from pasypy.timer import Timer\n'), ((501, 517), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (511, 517), False, 'import time\n'), ((778, 794), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (788, 794), False, 'import time\n'), ((1192, 1208), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1202, 1208), False, 'import time\n')] |
from io import StringIO
from unittest import TestCase, mock
from subrip_ranger import subtitles, timecodes
class TestBase:
SUBSECTIONS = [
'1\n'
'00:00:01,000 --> 00:00:02,000\n'
'subtext 1/1\n'
'subtext 1/2\n',
'2\n'
'00:00:04,000 --> 00:00:05,000\n'
'subtext 2/1\n'
'subtext 2/2\n',
]
START_INDEX = 1
class SubtitleTest(TestBase, TestCase):
def setUp(self):
self.subsection = self.SUBSECTIONS[0]
self.subtitle = subtitles.Subtitle(self.START_INDEX)
self.subtitle.parse(self.subsection)
def test_parsing(self):
"""parsing results expected values"""
self.assertEqual(self.subtitle.index, self.START_INDEX)
self.assertEqual(
self.subtitle.timecode_line.appearance.value.total_seconds(), 1
)
self.assertEqual(
self.subtitle.timecode_line.disappearance.value.total_seconds(), 2
)
self.assertEqual(len(self.subtitle.sublines), 2)
@mock.patch.object(timecodes.TimecodeLine, 'adjust')
def test_adjustment(self, mock_adjust):
"""adjustment called on timecode_line instance"""
self.subtitle.adjust()
mock_adjust.assert_called_once()
def test_formatting(self):
"""formatting unadjusted subtitle matches with original"""
self.assertEqual(str(self.subtitle), self.subsection)
class SubtitleSequenceTest(TestBase, TestCase):
def setUp(self):
self.subsections = '\n'.join(self.SUBSECTIONS)
self.input = StringIO(self.subsections)
self.subtitle_sequence = subtitles.SubtitleSequence(self.START_INDEX)
self.subtitle_sequence.parse(self.input)
def test_parsing(self):
"""subtitle entries fetched properly"""
result = len(self.subtitle_sequence.subtitles)
self.assertEqual(result, len(self.SUBSECTIONS))
@mock.patch.object(subtitles.Subtitle, 'adjust')
def test_adjustment(self, mock_adjust):
"""adjustment forwarded to each Subtitle instances"""
self.subtitle_sequence.adjust()
self.assertEqual(mock_adjust.call_count, len(self.SUBSECTIONS))
def test_writing(self):
"""output of unadjusted subtitles matches with original content"""
mock_file = mock.Mock()
self.subtitle_sequence.write(mock_file)
mock_file.write.assert_called_once_with(self.subsections)
| [
"subrip_ranger.subtitles.SubtitleSequence",
"unittest.mock.Mock",
"io.StringIO",
"unittest.mock.patch.object",
"subrip_ranger.subtitles.Subtitle"
] | [((1027, 1078), 'unittest.mock.patch.object', 'mock.patch.object', (['timecodes.TimecodeLine', '"""adjust"""'], {}), "(timecodes.TimecodeLine, 'adjust')\n", (1044, 1078), False, 'from unittest import TestCase, mock\n'), ((1910, 1957), 'unittest.mock.patch.object', 'mock.patch.object', (['subtitles.Subtitle', '"""adjust"""'], {}), "(subtitles.Subtitle, 'adjust')\n", (1927, 1957), False, 'from unittest import TestCase, mock\n'), ((516, 552), 'subrip_ranger.subtitles.Subtitle', 'subtitles.Subtitle', (['self.START_INDEX'], {}), '(self.START_INDEX)\n', (534, 552), False, 'from subrip_ranger import subtitles, timecodes\n'), ((1562, 1588), 'io.StringIO', 'StringIO', (['self.subsections'], {}), '(self.subsections)\n', (1570, 1588), False, 'from io import StringIO\n'), ((1622, 1666), 'subrip_ranger.subtitles.SubtitleSequence', 'subtitles.SubtitleSequence', (['self.START_INDEX'], {}), '(self.START_INDEX)\n', (1648, 1666), False, 'from subrip_ranger import subtitles, timecodes\n'), ((2300, 2311), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2309, 2311), False, 'from unittest import TestCase, mock\n')] |
import unittest
from sodasql.common.yaml_helper import YamlHelper
class YamlHelperTest(unittest.TestCase):
def test_valid_numeric_value(self):
value = YamlHelper.validate_numeric_value("column_name", "key", 2)
self.assertEqual(value, 2)
def test_valid_array_value(self):
value = YamlHelper.validate_list_value("column_name", "key", [1, 2, 3])
self.assertEqual(value, [1, 2, 3])
def test_invalid_numeric_value(self):
value = YamlHelper.validate_numeric_value("column_name", "key", None)
self.assertEqual(value, None)
def test_invalid_array_value(self):
value = YamlHelper.validate_list_value("column_name", "key", None)
self.assertEqual(value, None)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"sodasql.common.yaml_helper.YamlHelper.validate_list_value",
"sodasql.common.yaml_helper.YamlHelper.validate_numeric_value"
] | [((768, 783), 'unittest.main', 'unittest.main', ([], {}), '()\n', (781, 783), False, 'import unittest\n'), ((165, 223), 'sodasql.common.yaml_helper.YamlHelper.validate_numeric_value', 'YamlHelper.validate_numeric_value', (['"""column_name"""', '"""key"""', '(2)'], {}), "('column_name', 'key', 2)\n", (198, 223), False, 'from sodasql.common.yaml_helper import YamlHelper\n'), ((314, 377), 'sodasql.common.yaml_helper.YamlHelper.validate_list_value', 'YamlHelper.validate_list_value', (['"""column_name"""', '"""key"""', '[1, 2, 3]'], {}), "('column_name', 'key', [1, 2, 3])\n", (344, 377), False, 'from sodasql.common.yaml_helper import YamlHelper\n'), ((480, 541), 'sodasql.common.yaml_helper.YamlHelper.validate_numeric_value', 'YamlHelper.validate_numeric_value', (['"""column_name"""', '"""key"""', 'None'], {}), "('column_name', 'key', None)\n", (513, 541), False, 'from sodasql.common.yaml_helper import YamlHelper\n'), ((637, 695), 'sodasql.common.yaml_helper.YamlHelper.validate_list_value', 'YamlHelper.validate_list_value', (['"""column_name"""', '"""key"""', 'None'], {}), "('column_name', 'key', None)\n", (667, 695), False, 'from sodasql.common.yaml_helper import YamlHelper\n')] |
import collections
import copy
from typing import Union, Dict, List, Any
def create_dependency(key, value):
newkey = f"is_enabled_{key}"
title = value.get("title", key)
newvalue = {
"title": f"Enable {title}",
"default": "No",
"enum": ["No", "Yes"],
}
dep = {
"oneOf": [
{"properties": {newkey: {"enum": ["No"]}}},
{"properties": {newkey: {"enum": ["Yes"]}, key: value}, "required": [key]},
]
}
return newkey, newvalue, dep
def remove_schema_properties(schema: Dict[str, Any], properties: List[str]) -> None:
schemaprops = schema.get("properties", {})
for prop in properties:
schemaprops.pop(prop, None)
def add_boolean_guards_for_schema_properties(
schema: Dict[str, Any], properties: List[str]
) -> None:
if "dependencies" not in schema:
schema["dependencies"] = {}
props = schema.get("properties", {})
new_props = {}
for key, value in props.items():
if key in properties:
newkey, newvalue, dep = create_dependency(key, value)
new_props[newkey] = newvalue
schema["dependencies"][newkey] = dep
else:
new_props[key] = value
schema["properties"] = new_props
def replacekey(input: Union[Dict, List, object]) -> Any:
replacements = {"anyOf": "oneOf"}
if isinstance(input, dict):
# For every key in the dict, get either a replacement, or the key itself.
# Call this function recursively for values.
return {replacements.get(k, k): replacekey(v) for k, v in input.items()}
elif isinstance(input, list):
return [replacekey(item) for item in input]
else:
return input
def replacevalue(input: Union[Dict, List, object]) -> Any:
if isinstance(input, dict):
return {k: replacevalue(input[k]) for k in input}
elif isinstance(input, list):
return [replacevalue(item) for item in input]
else:
replacements = {"integer": "number"}
return replacements.get(input, input)
def addsibling(input: Union[Dict, List, object]) -> Any:
if isinstance(input, dict):
d = {k: addsibling(input[k]) for k in input}
if "oneOf" in input:
d["type"] = "object"
return d
elif isinstance(input, list):
return [addsibling(item) for item in input]
else:
return input
def delsibling(input: Union[Dict, List, object], siblings: Dict[str, str]) -> Any:
if isinstance(input, dict):
d = {k: delsibling(input[k], siblings) for k in input}
for key, value in siblings.items():
if key in input:
d.pop(value, None)
return d
elif isinstance(input, list):
return [delsibling(item, siblings) for item in input]
else:
return input
def getref(path: str, context: Dict):
"""Recursively returns nested items from a dict."""
if not path.startswith("#/"):
raise ValueError("ref path does not start with #/")
items = path[2:].split("/")
def recursive_get(keys, structure):
if len(keys) == 0:
return structure
else:
return recursive_get(keys[1:], structure[keys[0]])
return recursive_get(items, context)
def copytitle(input, context):
"""Copies "title" from "$ref" into oneOf."""
if isinstance(input, dict):
output = {}
for key in input:
if key == "oneOf":
initems = input[key]
outitems = []
for initem in initems:
outitem = copy.deepcopy(initem)
if "$ref" in initem and not "title" in initem:
ref = getref(initem["$ref"], context)
default_title = initem["$ref"].split("/")[-1]
outitem["title"] = ref.get("title", default_title)
outitems.append(outitem)
output[key] = outitems
else:
output[key] = copytitle(input[key], context)
return output
elif isinstance(input, list):
return [copytitle(item, context) for item in input]
else:
return input
def replaceenum(input: Union[Dict, List, object]) -> Any:
"""Replace singleton enums with const."""
if isinstance(input, collections.Mapping):
d1 = {
k: replaceenum(v)
for k, v in input.items()
if not (k == "enum" and isinstance(v, list) and len(v) == 1)
}
d2 = {
"const": v[0]
for k, v in input.items()
if (k == "enum" and isinstance(v, list) and len(v) == 1)
}
return {**d1, **d2} # Merge two dicts: https://stackoverflow.com/a/26853961
elif isinstance(input, list):
return [replaceenum(item) for item in input]
else:
return input
def addtitles(schema):
if isinstance(schema, dict):
for name, prop in schema.get("properties", {}).items():
prop["title"] = prop.get("title", name.capitalize()) # or whatever
for value in schema.values():
addtitles(value)
elif isinstance(schema, list):
for elt in schema:
addtitles(elt) | [
"copy.deepcopy"
] | [((3609, 3630), 'copy.deepcopy', 'copy.deepcopy', (['initem'], {}), '(initem)\n', (3622, 3630), False, 'import copy\n')] |
#!/usr/bin/env python
# Copyright (c) 2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cv
import cv_bridge
import PIL
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PIL/cv interaction
# The functions in this secion handle interactions between
# PIL and cvmat formats.
# **gen_msg() converts an RGB PIL image to a BGR cvmat image
# **rgb_to_bgr() converts an RGB PIL image to a BGR PIL image
# **PIL_to_cv() converts a BGR PIL image to a cvmat image
# **cv_to_msg() converts a cvmat image to a rosmsg format
# **msg_to_cv() converts a rosmsg image to a cvmat image
# **overlay() takes an original image, the size of that image,
# a rectangle defined in that original image, and a new
# new image and returns the original image with the new
# image overlayed in the defined rectangle.
# For the moment, this is mainly used for the Camera
# Display demo, overlaying the selected camera's image
# onto the UI display
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
def gen_cv(img):
return PIL_to_cv(rgb_to_bgr(img))
def rgb_to_bgr(img):
r, g, b = img.split()
return PIL.Image.merge('RGB', (b, g, r))
def PIL_to_cv(img):
ci = cv.CreateImage((1024, 600), cv.IPL_DEPTH_8U, 3)
cv.SetData(ci, img.tostring(), 3072)
return ci
def cv_to_msg(img):
return cv_bridge.CvBridge().cv_to_imgmsg(img, encoding='bgr8')
def msg_to_cv(img):
return cv_bridge.CvBridge().imgmsg_to_cv(img, desired_encoding='bgr8')
def overlay(old_img, new_img, original_size, new_rect):
tmp = cv.CreateImage(original_size, cv.IPL_DEPTH_8U, 3)
cv.Copy(old_img, tmp)
sub = cv.GetSubRect(tmp, new_rect)
cv_img = msg_to_cv(new_img)
cv.Copy(cv_img, sub)
return cv_to_msg(tmp)
| [
"cv.CreateImage",
"cv_bridge.CvBridge",
"cv.GetSubRect",
"cv.Copy",
"PIL.Image.merge"
] | [((2638, 2671), 'PIL.Image.merge', 'PIL.Image.merge', (['"""RGB"""', '(b, g, r)'], {}), "('RGB', (b, g, r))\n", (2653, 2671), False, 'import PIL\n'), ((2703, 2750), 'cv.CreateImage', 'cv.CreateImage', (['(1024, 600)', 'cv.IPL_DEPTH_8U', '(3)'], {}), '((1024, 600), cv.IPL_DEPTH_8U, 3)\n', (2717, 2750), False, 'import cv\n'), ((3060, 3109), 'cv.CreateImage', 'cv.CreateImage', (['original_size', 'cv.IPL_DEPTH_8U', '(3)'], {}), '(original_size, cv.IPL_DEPTH_8U, 3)\n', (3074, 3109), False, 'import cv\n'), ((3114, 3135), 'cv.Copy', 'cv.Copy', (['old_img', 'tmp'], {}), '(old_img, tmp)\n', (3121, 3135), False, 'import cv\n'), ((3146, 3174), 'cv.GetSubRect', 'cv.GetSubRect', (['tmp', 'new_rect'], {}), '(tmp, new_rect)\n', (3159, 3174), False, 'import cv\n'), ((3211, 3231), 'cv.Copy', 'cv.Copy', (['cv_img', 'sub'], {}), '(cv_img, sub)\n', (3218, 3231), False, 'import cv\n'), ((2839, 2859), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (2857, 2859), False, 'import cv_bridge\n'), ((2928, 2948), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (2946, 2948), False, 'import cv_bridge\n')] |
import logging
import sys
import time
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
logger = logging.getLogger(__name__)
def delete_odl_resources(odl_ip, odl_port, resource):
rsrc_list = odl_utils.get_odl_resource_list(odl_ip, odl_port, resource)
elem_names = odl_utils.odl_resource_list_names(resource, rsrc_list)
for elem in elem_names:
logger.info("Removing ODL resource: {0}/{1}".format(resource, elem))
odl_utils.delete_odl_resource_elem(odl_ip, odl_port, resource, elem)
def delete_odl_ietf_access_lists(odl_ip, odl_port):
acl_list = odl_utils.get_odl_acl_list(odl_ip, odl_port)
acl_types_names = odl_utils.odl_acl_types_names(acl_list)
for acl_type, acl_name in acl_types_names:
odl_utils.delete_odl_acl(odl_ip, odl_port, acl_type, acl_name)
def delete_vnfds():
t = os_sfc_utils.get_tacker_client()
vnfds = os_sfc_utils.list_vnfds(t)
if vnfds is None:
return
for vnfd in vnfds:
logger.info("Removing vnfd: {0}".format(vnfd))
os_sfc_utils.delete_vnfd(t, vnfd_id=vnfd)
def delete_vnfs():
t = os_sfc_utils.get_tacker_client()
vnfs = os_sfc_utils.list_vnfs(t)
if vnfs is None:
return
for vnf in vnfs:
logger.info("Removing vnf: {0}".format(vnf))
os_sfc_utils.delete_vnf(t, vnf_id=vnf)
def delete_vnffgs():
t = os_sfc_utils.get_tacker_client()
vnffgs = os_sfc_utils.list_vnffgs(t)
if vnffgs is None:
return
for vnffg in reversed(vnffgs):
logger.info("Removing vnffg: {0}".format(vnffg))
os_sfc_utils.delete_vnffg(t, vnffg_id=vnffg)
def delete_vnffgds():
t = os_sfc_utils.get_tacker_client()
vnffgds = os_sfc_utils.list_vnffgds(t)
if vnffgds is None:
return
for vnffgd in vnffgds:
logger.info("Removing vnffgd: {0}".format(vnffgd))
os_sfc_utils.delete_vnffgd(t, vnffgd_id=vnffgd)
def delete_vims():
t = os_sfc_utils.get_tacker_client()
vims = os_sfc_utils.list_vims(t)
if vims is None:
return
for vim in vims:
logger.info("Removing vim: {0}".format(vim))
os_sfc_utils.delete_vim(t, vim_id=vim)
# Creators is a list full of SNAPs objects
def delete_openstack_objects(creators):
for creator in reversed(creators):
try:
creator.clean()
except Exception as e:
logger.error('Unexpected error cleaning - %s', e)
# Networking-odl generates a new security group when creating a router
# which is not tracked by SNAPs
def delete_untracked_security_groups():
openstack_sfc = os_sfc_utils.OpenStackSFC()
openstack_sfc.delete_all_security_groups()
def cleanup_odl(odl_ip, odl_port):
delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
delete_odl_resources(odl_ip, odl_port, 'service-function-path')
delete_odl_resources(odl_ip, odl_port, 'service-function')
delete_odl_ietf_access_lists(odl_ip, odl_port)
def cleanup(creators, odl_ip=None, odl_port=None):
delete_vnffgs()
delete_vnffgds()
delete_vnfs()
time.sleep(20)
delete_vnfds()
delete_vims()
delete_openstack_objects(creators)
delete_untracked_security_groups()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
def cleanup_from_bash(odl_ip=None, odl_port=None):
delete_vnffgs()
delete_vnffgds()
delete_vnfs()
time.sleep(20)
delete_vnfds()
delete_vims()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
if __name__ == '__main__':
if len(sys.argv) > 2:
cleanup_from_bash(sys.argv[1], sys.argv[2])
| [
"logging.getLogger",
"time.sleep",
"sfc.lib.openstack_utils.delete_vnf",
"sfc.lib.openstack_utils.delete_vnffgd",
"sfc.lib.odl_utils.odl_resource_list_names",
"sfc.lib.odl_utils.odl_acl_types_names",
"sfc.lib.odl_utils.delete_odl_acl",
"sfc.lib.odl_utils.get_odl_acl_list",
"sfc.lib.odl_utils.delete_odl_resource_elem",
"sfc.lib.openstack_utils.list_vnfds",
"sfc.lib.openstack_utils.list_vims",
"sfc.lib.openstack_utils.get_tacker_client",
"sfc.lib.openstack_utils.OpenStackSFC",
"sfc.lib.odl_utils.get_odl_resource_list",
"sfc.lib.openstack_utils.list_vnfs",
"sfc.lib.openstack_utils.delete_vim",
"sfc.lib.openstack_utils.list_vnffgs",
"sfc.lib.openstack_utils.delete_vnffg",
"sfc.lib.openstack_utils.delete_vnfd",
"sfc.lib.openstack_utils.list_vnffgds"
] | [((134, 161), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (151, 161), False, 'import logging\n'), ((234, 293), 'sfc.lib.odl_utils.get_odl_resource_list', 'odl_utils.get_odl_resource_list', (['odl_ip', 'odl_port', 'resource'], {}), '(odl_ip, odl_port, resource)\n', (265, 293), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((311, 365), 'sfc.lib.odl_utils.odl_resource_list_names', 'odl_utils.odl_resource_list_names', (['resource', 'rsrc_list'], {}), '(resource, rsrc_list)\n', (344, 365), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((617, 661), 'sfc.lib.odl_utils.get_odl_acl_list', 'odl_utils.get_odl_acl_list', (['odl_ip', 'odl_port'], {}), '(odl_ip, odl_port)\n', (643, 661), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((684, 723), 'sfc.lib.odl_utils.odl_acl_types_names', 'odl_utils.odl_acl_types_names', (['acl_list'], {}), '(acl_list)\n', (713, 723), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((872, 904), 'sfc.lib.openstack_utils.get_tacker_client', 'os_sfc_utils.get_tacker_client', ([], {}), '()\n', (902, 904), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((917, 943), 'sfc.lib.openstack_utils.list_vnfds', 'os_sfc_utils.list_vnfds', (['t'], {}), '(t)\n', (940, 943), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1138, 1170), 'sfc.lib.openstack_utils.get_tacker_client', 'os_sfc_utils.get_tacker_client', ([], {}), '()\n', (1168, 1170), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1182, 1207), 'sfc.lib.openstack_utils.list_vnfs', 'os_sfc_utils.list_vnfs', (['t'], {}), '(t)\n', (1204, 1207), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1396, 1428), 'sfc.lib.openstack_utils.get_tacker_client', 'os_sfc_utils.get_tacker_client', ([], {}), '()\n', (1426, 1428), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1442, 1469), 'sfc.lib.openstack_utils.list_vnffgs', 'os_sfc_utils.list_vnffgs', (['t'], {}), '(t)\n', (1466, 1469), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1685, 1717), 'sfc.lib.openstack_utils.get_tacker_client', 'os_sfc_utils.get_tacker_client', ([], {}), '()\n', (1715, 1717), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1732, 1760), 'sfc.lib.openstack_utils.list_vnffgds', 'os_sfc_utils.list_vnffgds', (['t'], {}), '(t)\n', (1757, 1760), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1971, 2003), 'sfc.lib.openstack_utils.get_tacker_client', 'os_sfc_utils.get_tacker_client', ([], {}), '()\n', (2001, 2003), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((2015, 2040), 'sfc.lib.openstack_utils.list_vims', 'os_sfc_utils.list_vims', (['t'], {}), '(t)\n', (2037, 2040), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((2621, 2648), 'sfc.lib.openstack_utils.OpenStackSFC', 'os_sfc_utils.OpenStackSFC', ([], {}), '()\n', (2646, 2648), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((3173, 3187), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (3183, 3187), False, 'import time\n'), ((3509, 3523), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (3519, 3523), False, 'import time\n'), ((479, 547), 'sfc.lib.odl_utils.delete_odl_resource_elem', 'odl_utils.delete_odl_resource_elem', (['odl_ip', 'odl_port', 'resource', 'elem'], {}), '(odl_ip, odl_port, resource, elem)\n', (513, 547), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((779, 841), 'sfc.lib.odl_utils.delete_odl_acl', 'odl_utils.delete_odl_acl', (['odl_ip', 'odl_port', 'acl_type', 'acl_name'], {}), '(odl_ip, odl_port, acl_type, acl_name)\n', (803, 841), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((1067, 1108), 'sfc.lib.openstack_utils.delete_vnfd', 'os_sfc_utils.delete_vnfd', (['t'], {'vnfd_id': 'vnfd'}), '(t, vnfd_id=vnfd)\n', (1091, 1108), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1326, 1364), 'sfc.lib.openstack_utils.delete_vnf', 'os_sfc_utils.delete_vnf', (['t'], {'vnf_id': 'vnf'}), '(t, vnf_id=vnf)\n', (1349, 1364), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1608, 1652), 'sfc.lib.openstack_utils.delete_vnffg', 'os_sfc_utils.delete_vnffg', (['t'], {'vnffg_id': 'vnffg'}), '(t, vnffg_id=vnffg)\n', (1633, 1652), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((1894, 1941), 'sfc.lib.openstack_utils.delete_vnffgd', 'os_sfc_utils.delete_vnffgd', (['t'], {'vnffgd_id': 'vnffgd'}), '(t, vnffgd_id=vnffgd)\n', (1920, 1941), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((2159, 2197), 'sfc.lib.openstack_utils.delete_vim', 'os_sfc_utils.delete_vim', (['t'], {'vim_id': 'vim'}), '(t, vim_id=vim)\n', (2182, 2197), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n')] |
# Generated by Django 2.0.2 on 2018-02-02 05:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='utility',
name='pub_date',
),
]
| [
"django.db.migrations.RemoveField"
] | [((215, 276), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""utility"""', 'name': '"""pub_date"""'}), "(model_name='utility', name='pub_date')\n", (237, 276), False, 'from django.db import migrations\n')] |
#
# Copyright 2018-2020 Clement
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import pytest
from flask import request
from PIL import Image
from flaskr.map import create_static_map
from flaskr.map import gpx_to_simplified_geojson
@pytest.mark.parametrize(
"map_type",
(
"viewer",
"player",
),
)
def test_viewers(files, client, app, auth, map_type):
"""
Test the map viewer (2D) and map player (3D).
"""
# unknown book ID:
rv = client.get("/map/" + map_type + "/42/first_story/test_Gillespie_Circuit/nz")
assert rv.status_code == 404
# unknown book name:
rv = client.get("/map/" + map_type + "/1/bad_story/test_Gillespie_Circuit/nz")
assert rv.status_code == 404
# restricted book (access level = 1):
rv = client.get("/map/" + map_type + "/4/fourth_story/test_Gillespie_Circuit/fr")
assert rv.status_code == 404
# bad country code:
rv = client.get("/map/" + map_type + "/1/first_story/test_Unknown/hell")
assert rv.status_code == 404
with client:
# check granted access and raw GPX download link access:
auth.login()
rv = client.get(
"/map/" + map_type + "/4/fourth_story/test_Gillespie_Circuit/fr"
)
assert rv.status_code == 200
assert b'href="/stories/4/test_Gillespie_Circuit.gpx"' not in rv.data
# actual GPX download tested in test_restricted_access.py
auth.logout()
auth.login("<EMAIL>", "admin")
rv = client.get(
"/map/" + map_type + "/4/fourth_story/test_Gillespie_Circuit/fr"
)
assert rv.status_code == 200
assert b'href="/stories/4/test_Gillespie_Circuit.gpx"' in rv.data
auth.logout()
# check unrestricted track:
rv = client.get("/map/" + map_type + "/1/first_story/test_Gillespie_Circuit/nz")
assert rv.status_code == 200
# test map.get_thumbnail_path():
static_map_url = (
request.url_root.encode() + b"map/static_map/1/test_Gillespie_Circuit.jpg"
)
assert (
b'meta name="twitter:image" property="og:image" content="'
+ static_map_url
+ b'"'
in rv.data
)
def test_gpx_to_simplified_geojson(files):
"""
Test the GPX to GeoJSON conversion.
"""
with open("test_gpx_to_geojson.expected_output.geojson") as expected_output:
assert expected_output.read() == gpx_to_simplified_geojson(
"test_gpx_to_geojson.gpx"
)
@pytest.mark.parametrize(
"track_type",
(
"geojson",
"webtrack",
),
)
def test_export(files, client, app, auth, track_type):
"""
Test the WebTrack access.
"""
# unknown book:
rv = client.get("/map/" + track_type + "s/42/test_Gillespie_Circuit." + track_type)
assert rv.status_code == 404
# restricted book:
rv = client.get("/map/" + track_type + "s/4/my_track." + track_type)
assert rv.status_code == 404
# unknown track:
rv = client.get("/map/" + track_type + "s/1/test_Unknown." + track_type)
assert rv.status_code == 404
# empty GPX file:
auth.login()
with pytest.raises(EOFError, match="GPX file is empty"):
rv = client.get("/map/" + track_type + "s/4/my_track." + track_type)
auth.logout()
# all good:
track_filename = "test_Gillespie_Circuit." + track_type
rv = client.get("/map/" + track_type + "s/1/" + track_filename)
assert rv.status_code == 200
with app.app_context():
track_path = os.path.join(
app.config["SHELF_FOLDER"], "first_story", track_filename
)
assert os.path.isfile(track_path)
if track_type == "webtrack":
webtrack_header = b"webtrack-bin"
assert open(track_path, "rb").read(len(webtrack_header)) == webtrack_header
elif track_type == "geojson":
geojson_header = '{"type":"FeatureCollection","features":['
assert open(track_path, "r").read(len(geojson_header)) == geojson_header
def test_static_map(files, client, auth):
"""
Test the static map.
"""
# unknown book:
rv = client.get("/map/static_map/42/test_Gillespie_Circuit.jpg")
assert rv.status_code == 404
# restricted book:
rv = client.get("/map/static_map/4/my_track.jpg")
assert rv.status_code == 404
# unknown track:
rv = client.get("/map/static_map/1/test_Unknown.jpg")
assert rv.status_code == 404
# empty GPX file:
auth.login()
with pytest.raises(EOFError, match="GPX file is empty"):
rv = client.get("/map/static_map/4/my_track.jpg")
auth.logout()
# all good:
rv = client.get("/map/static_map/1/test_Gillespie_Circuit.jpg")
assert rv.status_code == 200
def test_create_static_map(app):
with app.app_context():
static_image = "Gillespie_Circuit.jpeg"
create_static_map(
"Gillespie_Circuit.gpx", static_image, app.config["MAPBOX_STATIC_IMAGES"]
)
with Image.open(static_image) as im:
assert im.format == "JPEG"
assert im.size == (1600, 1000)
assert im.mode == "RGB"
os.remove(static_image)
@pytest.mark.parametrize(
"path",
(
# LDS aerial:
"/map/middleware/lds/set=2/a/0/0/0",
# LDS topo:
"/map/middleware/lds/layer=767/a/0/0/0",
# IGN aerial:
"/map/middleware/ign?layer=GEOGRAPHICALGRIDSYSTEMS.MAPS&style=normal&tilematrixset=PM&Service=WMTS&Request"
"=GetTile&Version=1.0.0&Format=image/jpeg&TileMatrix=11&TileCol=1023&TileRow=753",
# IGN topo:
"/map/middleware/ign?layer=ORTHOIMAGERY.ORTHOPHOTOS&style=normal&tilematrixset=PM&Service=WMTS&Request"
"=GetTile&Version=1.0.0&Format=image%2Fjpeg&TileMatrix=11&TileCol=1026&TileRow=753",
),
)
def test_map_proxy_link_ok(client, path):
""" Check the links availability. """
rv = client.get(path)
assert rv.status_code == 200
| [
"flaskr.map.gpx_to_simplified_geojson",
"PIL.Image.open",
"os.path.join",
"flaskr.map.create_static_map",
"os.path.isfile",
"pytest.mark.parametrize",
"pytest.raises",
"flask.request.url_root.encode",
"os.remove"
] | [((1693, 1750), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""map_type"""', "('viewer', 'player')"], {}), "('map_type', ('viewer', 'player'))\n", (1716, 1750), False, 'import pytest\n'), ((3988, 4050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""track_type"""', "('geojson', 'webtrack')"], {}), "('track_type', ('geojson', 'webtrack'))\n", (4011, 4050), False, 'import pytest\n'), ((6672, 7179), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', "('/map/middleware/lds/set=2/a/0/0/0',\n '/map/middleware/lds/layer=767/a/0/0/0',\n '/map/middleware/ign?layer=GEOGRAPHICALGRIDSYSTEMS.MAPS&style=normal&tilematrixset=PM&Service=WMTS&Request=GetTile&Version=1.0.0&Format=image/jpeg&TileMatrix=11&TileCol=1023&TileRow=753'\n ,\n '/map/middleware/ign?layer=ORTHOIMAGERY.ORTHOPHOTOS&style=normal&tilematrixset=PM&Service=WMTS&Request=GetTile&Version=1.0.0&Format=image%2Fjpeg&TileMatrix=11&TileCol=1026&TileRow=753'\n )"], {}), "('path', ('/map/middleware/lds/set=2/a/0/0/0',\n '/map/middleware/lds/layer=767/a/0/0/0',\n '/map/middleware/ign?layer=GEOGRAPHICALGRIDSYSTEMS.MAPS&style=normal&tilematrixset=PM&Service=WMTS&Request=GetTile&Version=1.0.0&Format=image/jpeg&TileMatrix=11&TileCol=1023&TileRow=753'\n ,\n '/map/middleware/ign?layer=ORTHOIMAGERY.ORTHOPHOTOS&style=normal&tilematrixset=PM&Service=WMTS&Request=GetTile&Version=1.0.0&Format=image%2Fjpeg&TileMatrix=11&TileCol=1026&TileRow=753'\n ))\n", (6695, 7179), False, 'import pytest\n'), ((4638, 4688), 'pytest.raises', 'pytest.raises', (['EOFError'], {'match': '"""GPX file is empty"""'}), "(EOFError, match='GPX file is empty')\n", (4651, 4688), False, 'import pytest\n'), ((5012, 5083), 'os.path.join', 'os.path.join', (["app.config['SHELF_FOLDER']", '"""first_story"""', 'track_filename'], {}), "(app.config['SHELF_FOLDER'], 'first_story', track_filename)\n", (5024, 5083), False, 'import os\n'), ((5121, 5147), 'os.path.isfile', 'os.path.isfile', (['track_path'], {}), '(track_path)\n', (5135, 5147), False, 'import os\n'), ((5994, 6044), 'pytest.raises', 'pytest.raises', (['EOFError'], {'match': '"""GPX file is empty"""'}), "(EOFError, match='GPX file is empty')\n", (6007, 6044), False, 'import pytest\n'), ((6359, 6456), 'flaskr.map.create_static_map', 'create_static_map', (['"""Gillespie_Circuit.gpx"""', 'static_image', "app.config['MAPBOX_STATIC_IMAGES']"], {}), "('Gillespie_Circuit.gpx', static_image, app.config[\n 'MAPBOX_STATIC_IMAGES'])\n", (6376, 6456), False, 'from flaskr.map import create_static_map\n'), ((6645, 6668), 'os.remove', 'os.remove', (['static_image'], {}), '(static_image)\n', (6654, 6668), False, 'import os\n'), ((3433, 3458), 'flask.request.url_root.encode', 'request.url_root.encode', ([], {}), '()\n', (3456, 3458), False, 'from flask import request\n'), ((3910, 3962), 'flaskr.map.gpx_to_simplified_geojson', 'gpx_to_simplified_geojson', (['"""test_gpx_to_geojson.gpx"""'], {}), "('test_gpx_to_geojson.gpx')\n", (3935, 3962), False, 'from flaskr.map import gpx_to_simplified_geojson\n'), ((6487, 6511), 'PIL.Image.open', 'Image.open', (['static_image'], {}), '(static_image)\n', (6497, 6511), False, 'from PIL import Image\n')] |
from __future__ import unicode_literals
import frappe
import traceback
import unicodedata
from frappe import auth
import datetime
import json, ast
from frappe import _
import requests
@frappe.whitelist(allow_guest=True)
def lead(**kwargs):
lead =frappe.get_doc(kwargs['data'])
lead.insert()
lead_name = lead.name
frappe.db.commit()
if (lead_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"lead": lead_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def opportunity(**kwargs):
opportunity =frappe.get_doc(kwargs['data'])
opportunity.insert()
opportunity_name = opportunity.name
frappe.db.commit()
if (opportunity_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"opportunity": opportunity_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def quotation(**kwargs):
quotation =frappe.get_doc(kwargs['data'])
quotation.insert()
quotation_name = quotation.name
frappe.db.commit()
if (quotation_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"quotation": quotation_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def customer(**kwargs):
customer = frappe.new_doc('Customer')
customer.customer_name =kwargs['data']['customer_name']
customer.customer_type =kwargs['data']['customer_type']
customer.customer_group =kwargs['data']['customer_group']
customer.territory = kwargs['data']['territory']
customer.market_segment = kwargs['data']['market_segment']
customer.industry = kwargs['data']['industry']
customer.tax_id = kwargs['data']['tax_id']
customer.default_currency = kwargs['data']['default_currency']
customer.default_price_list = kwargs['data']['default_price_list']
customer.default_sales_partner = kwargs['data']['default_sales_partner']
#customer.credit_limits = kwargs['data']['credit_limits']
customer.insert()
customer_name = customer.name
contact = frappe.new_doc('Contact')
contact_link = [{
"link_doctype": "Customer",
"link_name": customer_name,
"link_title": customer_name
}]
contact.first_name = kwargs['data']['customer_name']
contact.email_id = kwargs['data']['email_id']
contact.mobile_no = kwargs['data']['mobile_no']
contact.is_primary_contact = 1
contact.is_billing_contact = 1
#contact.links = contact_link
contact.insert()
address = frappe.new_doc('Address')
address_link = [{
"link_doctype": "Customer",
"link_name": customer_name,
"link_title": customer_name
}]
address.address_title = kwargs['data']['customer_name']
address.address_line1 = kwargs['data']['address_line1']
address.city = kwargs['data']['city']
address.country = kwargs['data']['country']
address.address_type = "Billing"
address.is_primary_address_type = 1
address.is_shipping_address_type = 1
#address.links = address_link
address.insert()
#customer =frappe.get_doc(kwargs['data'])
#customer.insert()
frappe.db.commit()
if (customer_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"customer": customer_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def sales_order(**kwargs):
sales_order =frappe.get_doc(kwargs['data'])
sales_order.insert()
sales_order_name = sales_order.name
frappe.db.commit()
if (sales_order_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"sales_order": sales_order_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def sales_invoice(**kwargs):
sales_invoice =frappe.get_doc(kwargs['data'])
sales_invoice.insert()
sales_invoice_name = sales_invoice.name
frappe.db.commit()
if (sales_invoice_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"sales_invoice": sales_invoice_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def payment_entry(**kwargs):
payment_entry =frappe.get_doc(kwargs['data'])
payment_entry.insert()
payment_entry_name = payment_entry.name
frappe.db.commit()
if (payment_entry_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"payment_entry": payment_entry_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def item(**kwargs):
item =frappe.get_doc(kwargs['data'])
item.insert()
item_name = item.name
frappe.db.commit()
if (item_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة الصنف بنجاح!",
"item": item_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة الصنف . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def material_request(**kwargs):
material_request =frappe.get_doc(kwargs['data'])
material_request.insert()
material_request_name = material_request.name
frappe.db.commit()
if (material_request_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"material_request": material_request_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def stock_entry(**kwargs):
stock_entry =frappe.get_doc(kwargs['data'])
stock_entry.insert()
stock_entry_name = stock_entry.name
frappe.db.commit()
if (stock_entry_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"stock_entry": stock_entry_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def delivery_note(**kwargs):
delivery_note =frappe.get_doc(kwargs['data'])
delivery_note.insert()
delivery_note_name = delivery_note.name
frappe.db.commit()
if (delivery_note_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"delivery_note": delivery_note_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def purchase_receipt(**kwargs):
purchase_receipt =frappe.get_doc(kwargs['data'])
purchase_receipt.insert()
purchase_receipt_name = purchase_receipt.name
frappe.db.commit()
if (purchase_receipt_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة المعاملة بنجاح!",
"purchase_receipt": purchase_receipt_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة المعاملة . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def comment(**kwargs):
comment =frappe.get_doc(kwargs['data'])
comment.insert()
comment_name = comment.name
frappe.db.commit()
if (comment_name):
message = frappe.response["message"] = {
"success_key": True,
"message": "تم اضافة التعليق بنجاح!",
"comment": comment_name
}
return message
else:
return "حدث خطأ ولم نتمكن من اضافة التعليق . برجاء المحاولة مرة اخري!"
@frappe.whitelist(allow_guest=True)
def add_item_list(**kwargs):
start = 0
page_length = 20
try:
if kwargs['search_text']:
items = frappe.db.sql(""" select tabItem.name as name ,
tabItem.item_name as item_name,
tabItem.item_group as item_group,
tabItem.stock_uom as stock_uom,
tabItem.image as image,
tabItem.sales_uom as sales_uom,
ifnull((select max(price_list_rate) from `tabItem Price` where item_code = tabItem.name and price_list = '{price_list}'),0) as price_list_rate,
ifnull((select distinct `tabItem Tax Template Detail`.tax_rate from `tabItem Tax Template Detail` join `tabItem Tax`
where `tabItem Tax Template Detail`.parent = `tabItem Tax`.item_tax_template and `tabItem Tax`.parent = `tabItem`.name),0) as tax_percent
from tabItem where tabItem.disabled = 0 and tabItem.name like '%{item}%' or tabItem.item_name like '%{item}%' LIMIT {start},{page_length}""".format(start=kwargs['start'], page_length=kwargs['page_length'], price_list=kwargs['price_list'],item=kwargs['search_text']), as_dict=1)
result = []
for item_dict in items:
if item_dict.tax_percent > 0 and item_dict.price_list_rate > 0:
net_rate = item_dict.price_list_rate * (1 + (item_dict.tax_percent / 100))
vat_value = net_rate - item_dict.price_list_rate
data = {
'name': item_dict.name,
'item_name': item_dict.item_name,
'item_group': item_dict.item_group,
'stock_uom': item_dict.stock_uom,
'image': item_dict.image,
'sales_uom': item_dict.sales_uom,
'price_list_rate': item_dict.price_list_rate,
'tax_percent': item_dict.tax_percent,
'net_rate': net_rate,
'vat_value': vat_value
}
result.append(data)
else:
data = {
'name': item_dict.name,
'item_name': item_dict.item_name,
'item_group': item_dict.item_group,
'stock_uom': item_dict.stock_uom,
'image': item_dict.image,
'sales_uom': item_dict.sales_uom,
'price_list_rate': item_dict.price_list_rate,
'tax_percent': item_dict.tax_percent,
'net_rate': item_dict.price_list_rate
}
result.append(data)
if items:
return result
else:
return "لا يوجد منتجات !"
except:
items = frappe.db.sql(""" select tabItem.name as name ,
tabItem.item_name as item_name,
tabItem.item_group as item_group,
tabItem.stock_uom as stock_uom,
tabItem.image as image,
tabItem.sales_uom as sales_uom,
ifnull((select max(price_list_rate) from `tabItem Price` where item_code = tabItem.name and price_list = '{price_list}'),0) as price_list_rate,
ifnull((select distinct `tabItem Tax Template Detail`.tax_rate from `tabItem Tax Template Detail` join `tabItem Tax`
where `tabItem Tax Template Detail`.parent = `tabItem Tax`.item_tax_template and `tabItem Tax`.parent = `tabItem`.name),0) as tax_percent
from tabItem where tabItem.disabled = 0 LIMIT {start},{page_length} """.format(start=kwargs['start'], page_length=kwargs['page_length'], price_list=kwargs['price_list']), as_dict=1)
result = []
for item_dict in items:
if item_dict.tax_percent > 0 and item_dict.price_list_rate > 0:
net_rate = item_dict.price_list_rate * (1 + (item_dict.tax_percent / 100))
vat_value = net_rate - item_dict.price_list_rate
data = {
'name': item_dict.name,
'item_name': item_dict.item_name,
'item_group': item_dict.item_group,
'stock_uom': item_dict.stock_uom,
'image': item_dict.image,
'sales_uom': item_dict.sales_uom,
'price_list_rate': item_dict.price_list_rate,
'tax_percent': item_dict.tax_percent,
'net_rate': net_rate,
'vat_value': vat_value
}
result.append(data)
else:
data = {
'name': item_dict.name,
'item_name': item_dict.item_name,
'item_group': item_dict.item_group,
'stock_uom': item_dict.stock_uom,
'image': item_dict.image,
'sales_uom': item_dict.sales_uom,
'price_list_rate': item_dict.price_list_rate,
'tax_percent': item_dict.tax_percent,
'net_rate': item_dict.price_list_rate
}
result.append(data)
if items:
return result
else:
return "لا يوجد منتجات !"
| [
"frappe.db.commit",
"frappe.whitelist",
"frappe.get_doc",
"frappe.new_doc"
] | [((187, 221), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (203, 221), False, 'import frappe\n'), ((659, 693), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (675, 693), False, 'import frappe\n'), ((1187, 1221), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (1203, 1221), False, 'import frappe\n'), ((1699, 1733), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (1715, 1733), False, 'import frappe\n'), ((3972, 4006), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (3988, 4006), False, 'import frappe\n'), ((4505, 4539), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (4521, 4539), False, 'import frappe\n'), ((5049, 5083), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (5065, 5083), False, 'import frappe\n'), ((5593, 5627), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (5609, 5627), False, 'import frappe\n'), ((6059, 6093), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (6075, 6093), False, 'import frappe\n'), ((6627, 6661), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (6643, 6661), False, 'import frappe\n'), ((7156, 7190), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (7172, 7190), False, 'import frappe\n'), ((7700, 7734), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (7716, 7734), False, 'import frappe\n'), ((8268, 8302), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (8284, 8302), False, 'import frappe\n'), ((8762, 8796), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (8778, 8796), False, 'import frappe\n'), ((252, 282), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (266, 282), False, 'import frappe\n'), ((332, 350), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (348, 350), False, 'import frappe\n'), ((738, 768), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (752, 768), False, 'import frappe\n'), ((839, 857), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (855, 857), False, 'import frappe\n'), ((1262, 1292), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (1276, 1292), False, 'import frappe\n'), ((1357, 1375), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (1373, 1375), False, 'import frappe\n'), ((1774, 1800), 'frappe.new_doc', 'frappe.new_doc', (['"""Customer"""'], {}), "('Customer')\n", (1788, 1800), False, 'import frappe\n'), ((2545, 2570), 'frappe.new_doc', 'frappe.new_doc', (['"""Contact"""'], {}), "('Contact')\n", (2559, 2570), False, 'import frappe\n'), ((3007, 3032), 'frappe.new_doc', 'frappe.new_doc', (['"""Address"""'], {}), "('Address')\n", (3021, 3032), False, 'import frappe\n'), ((3633, 3651), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (3649, 3651), False, 'import frappe\n'), ((4056, 4086), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (4070, 4086), False, 'import frappe\n'), ((4157, 4175), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (4173, 4175), False, 'import frappe\n'), ((4588, 4618), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (4602, 4618), False, 'import frappe\n'), ((4695, 4713), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (4711, 4713), False, 'import frappe\n'), ((5132, 5162), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (5146, 5162), False, 'import frappe\n'), ((5239, 5257), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (5255, 5257), False, 'import frappe\n'), ((5658, 5688), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (5672, 5688), False, 'import frappe\n'), ((5738, 5756), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (5754, 5756), False, 'import frappe\n'), ((6148, 6178), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (6162, 6178), False, 'import frappe\n'), ((6264, 6282), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (6280, 6282), False, 'import frappe\n'), ((6706, 6736), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (6720, 6736), False, 'import frappe\n'), ((6807, 6825), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (6823, 6825), False, 'import frappe\n'), ((7239, 7269), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (7253, 7269), False, 'import frappe\n'), ((7346, 7364), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (7362, 7364), False, 'import frappe\n'), ((7789, 7819), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (7803, 7819), False, 'import frappe\n'), ((7905, 7923), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (7921, 7923), False, 'import frappe\n'), ((8339, 8369), 'frappe.get_doc', 'frappe.get_doc', (["kwargs['data']"], {}), "(kwargs['data'])\n", (8353, 8369), False, 'import frappe\n'), ((8428, 8446), 'frappe.db.commit', 'frappe.db.commit', ([], {}), '()\n', (8444, 8446), False, 'import frappe\n')] |
import justpy as jp
@jp.SetRoute("/home")
def home():
wp = jp.QuasarPage(tailwind=True)
div = jp.Div(a=wp, classes="bg-gray-200 h-screen")
div1 = jp.Div(a=div, classes="grid grid-cols-3 gap-4 p-4")
in_1 = jp.Input(a=div1, placeholder="Enter first value",
classes="form-input")
in_2 = jp.Input(a=div1, placeholder="Enter second value",
classes="form-input")
d_output = jp.Div(a=div1, text="Result goes here...", classes="text-gray-600")
jp.Div(a=div1, text="Just another div...", classes="text-gray-600")
jp.Div(a=div1, text="Yet another div", classes="text-gray-600")
div2 = jp.Div(a=div, classes="grid grid-cols-2 gap-4")
jp.Button(a=div2, text="Calculate", click = sum_up, in1=in_1, in2=in_2,
d = d_output,
classes="border border-blue-500 m-2 py-1 px-4 rounded "
"text-blue-600 hover:bg-red-500 hover:text-white")
jp.Div(a=div2, text="I am a cool interactive div!", mouseenter=mouse_enter,
mouseleave=mouse_leave,
classes = "hover:bg-red-500")
return wp
@jp.SetRoute("/about")
def about():
wp = jp.QuasarPage(tailwind=True)
def sum_up(widget, msg):
sum = float(widget.in1.value) + float(widget.in2.value)
widget.d.text = sum
def mouse_enter(widget, msg):
widget.text = "A mouse entered the house!"
def mouse_leave(widget, msg):
widget.text = "The mouse left!"
jp.justpy()
| [
"justpy.Button",
"justpy.justpy",
"justpy.Input",
"justpy.SetRoute",
"justpy.Div",
"justpy.QuasarPage"
] | [((22, 42), 'justpy.SetRoute', 'jp.SetRoute', (['"""/home"""'], {}), "('/home')\n", (33, 42), True, 'import justpy as jp\n'), ((1107, 1128), 'justpy.SetRoute', 'jp.SetRoute', (['"""/about"""'], {}), "('/about')\n", (1118, 1128), True, 'import justpy as jp\n'), ((1436, 1447), 'justpy.justpy', 'jp.justpy', ([], {}), '()\n', (1445, 1447), True, 'import justpy as jp\n'), ((64, 92), 'justpy.QuasarPage', 'jp.QuasarPage', ([], {'tailwind': '(True)'}), '(tailwind=True)\n', (77, 92), True, 'import justpy as jp\n'), ((103, 147), 'justpy.Div', 'jp.Div', ([], {'a': 'wp', 'classes': '"""bg-gray-200 h-screen"""'}), "(a=wp, classes='bg-gray-200 h-screen')\n", (109, 147), True, 'import justpy as jp\n'), ((160, 211), 'justpy.Div', 'jp.Div', ([], {'a': 'div', 'classes': '"""grid grid-cols-3 gap-4 p-4"""'}), "(a=div, classes='grid grid-cols-3 gap-4 p-4')\n", (166, 211), True, 'import justpy as jp\n'), ((223, 294), 'justpy.Input', 'jp.Input', ([], {'a': 'div1', 'placeholder': '"""Enter first value"""', 'classes': '"""form-input"""'}), "(a=div1, placeholder='Enter first value', classes='form-input')\n", (231, 294), True, 'import justpy as jp\n'), ((319, 391), 'justpy.Input', 'jp.Input', ([], {'a': 'div1', 'placeholder': '"""Enter second value"""', 'classes': '"""form-input"""'}), "(a=div1, placeholder='Enter second value', classes='form-input')\n", (327, 391), True, 'import justpy as jp\n'), ((420, 487), 'justpy.Div', 'jp.Div', ([], {'a': 'div1', 'text': '"""Result goes here..."""', 'classes': '"""text-gray-600"""'}), "(a=div1, text='Result goes here...', classes='text-gray-600')\n", (426, 487), True, 'import justpy as jp\n'), ((492, 559), 'justpy.Div', 'jp.Div', ([], {'a': 'div1', 'text': '"""Just another div..."""', 'classes': '"""text-gray-600"""'}), "(a=div1, text='Just another div...', classes='text-gray-600')\n", (498, 559), True, 'import justpy as jp\n'), ((564, 627), 'justpy.Div', 'jp.Div', ([], {'a': 'div1', 'text': '"""Yet another div"""', 'classes': '"""text-gray-600"""'}), "(a=div1, text='Yet another div', classes='text-gray-600')\n", (570, 627), True, 'import justpy as jp\n'), ((640, 687), 'justpy.Div', 'jp.Div', ([], {'a': 'div', 'classes': '"""grid grid-cols-2 gap-4"""'}), "(a=div, classes='grid grid-cols-2 gap-4')\n", (646, 687), True, 'import justpy as jp\n'), ((692, 892), 'justpy.Button', 'jp.Button', ([], {'a': 'div2', 'text': '"""Calculate"""', 'click': 'sum_up', 'in1': 'in_1', 'in2': 'in_2', 'd': 'd_output', 'classes': '"""border border-blue-500 m-2 py-1 px-4 rounded text-blue-600 hover:bg-red-500 hover:text-white"""'}), "(a=div2, text='Calculate', click=sum_up, in1=in_1, in2=in_2, d=\n d_output, classes=\n 'border border-blue-500 m-2 py-1 px-4 rounded text-blue-600 hover:bg-red-500 hover:text-white'\n )\n", (701, 892), True, 'import justpy as jp\n'), ((939, 1070), 'justpy.Div', 'jp.Div', ([], {'a': 'div2', 'text': '"""I am a cool interactive div!"""', 'mouseenter': 'mouse_enter', 'mouseleave': 'mouse_leave', 'classes': '"""hover:bg-red-500"""'}), "(a=div2, text='I am a cool interactive div!', mouseenter=mouse_enter,\n mouseleave=mouse_leave, classes='hover:bg-red-500')\n", (945, 1070), True, 'import justpy as jp\n'), ((1151, 1179), 'justpy.QuasarPage', 'jp.QuasarPage', ([], {'tailwind': '(True)'}), '(tailwind=True)\n', (1164, 1179), True, 'import justpy as jp\n')] |
import logging
from spinboard import Spinboard, get_logger
def setup():
# TODO how to enable logging properly?..
logging.basicConfig(level=logging.DEBUG)
def test_query():
setup()
ps = Spinboard()
results = list(ps.iter_by_query('minds are weird', limit=20))
assert len(results) >= 20
def test_tag():
setup()
ps = Spinboard()
results = list(ps.iter_by_tag('database', limit=20))
assert len(results) >= 20
| [
"logging.basicConfig",
"spinboard.Spinboard"
] | [((124, 164), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (143, 164), False, 'import logging\n'), ((206, 217), 'spinboard.Spinboard', 'Spinboard', ([], {}), '()\n', (215, 217), False, 'from spinboard import Spinboard, get_logger\n'), ((353, 364), 'spinboard.Spinboard', 'Spinboard', ([], {}), '()\n', (362, 364), False, 'from spinboard import Spinboard, get_logger\n')] |
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""json rpc dispatcher"""
import json
import re
from urllib.parse import urlsplit, urlparse
from iconcommons.logger import Logger
from jsonrpcserver import config
from jsonrpcserver.aio import AsyncMethods
from jsonrpcserver.response import ExceptionResponse
from sanic import response as sanic_response
from iconrpcserver.dispatcher import GenericJsonRpcServerError
from iconrpcserver.server.rest_property import RestProperty
from iconrpcserver.default_conf.icon_rpcserver_constant import ConfigKey, NodeType, ApiVersion, DISPATCH_V2_TAG
from iconrpcserver.protos import message_code
from iconrpcserver.dispatcher import validate_jsonschema_v2
from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType
from iconrpcserver.utils.json_rpc import redirect_request_to_rs, get_block_v2_by_params
from iconrpcserver.utils.message_queue.stub_collection import StubCollection
config.log_requests = False
config.log_responses = False
methods = AsyncMethods()
class Version2Dispatcher:
@staticmethod
async def dispatch(request):
req = request.json
url = request.url
context = {
"url": url
}
if "node_" in req["method"]:
return sanic_response.text("no support method!")
try:
client_ip = request.remote_addr if request.remote_addr else request.ip
Logger.info(f'rest_server_v2 request with {req}', DISPATCH_V2_TAG)
Logger.info(f"{client_ip} requested {req} on {url}")
validate_jsonschema_v2(request=req)
except GenericJsonRpcServerError as e:
response = ExceptionResponse(e, request_id=req.get('id', 0))
else:
response = await methods.dispatch(req, context=context)
Logger.info(f'rest_server_v2 response with {response}', DISPATCH_V2_TAG)
return sanic_response.json(response, status=response.http_status, dumps=json.dumps)
@staticmethod
def get_dispatch_protocol_from_url(url: str) -> str:
return urlsplit(url).scheme
@staticmethod
@methods.add
async def icx_sendTransaction(**kwargs):
url = kwargs['context']['url']
path = urlparse(url).path
del kwargs['context']
if RestProperty().node_type == NodeType.CitizenNode:
dispatch_protocol = Version2Dispatcher.get_dispatch_protocol_from_url(url)
Logger.debug(f'Dispatch Protocol: {dispatch_protocol}')
redirect_protocol = StubCollection().conf.get(ConfigKey.REDIRECT_PROTOCOL)
Logger.debug(f'Redirect Protocol: {redirect_protocol}')
if redirect_protocol:
dispatch_protocol = redirect_protocol
Logger.debug(f'Protocol: {dispatch_protocol}')
return await redirect_request_to_rs(dispatch_protocol, kwargs, RestProperty().rs_target, path[1:],
ApiVersion.v2.name)
request = make_request("icx_sendTransaction", kwargs, ParamType.send_tx)
channel = StubCollection().conf[ConfigKey.CHANNEL]
icon_stub = StubCollection().icon_score_stubs[channel]
response = await icon_stub.async_task().validate_transaction(request)
# Error Check
response_to_json_query(response)
channel_name = StubCollection().conf[ConfigKey.CHANNEL]
channel_tx_creator_stub = StubCollection().channel_tx_creator_stubs[channel_name]
response_code, tx_hash = await channel_tx_creator_stub.async_task().create_icx_tx(kwargs)
response_data = {'response_code': response_code}
if response_code != message_code.Response.success:
response_data['message'] = message_code.responseCodeMap[response_code][1]
else:
response_data['tx_hash'] = tx_hash
return response_data
@staticmethod
@methods.add
async def icx_getTransactionResult(**kwargs):
channel_name = StubCollection().conf[ConfigKey.CHANNEL]
channel_stub = StubCollection().channel_stubs[channel_name]
verify_result = {}
message = None
tx_hash = kwargs["tx_hash"]
if is_hex(tx_hash):
response_code, result = await channel_stub.async_task().get_invoke_result(tx_hash)
if response_code == message_code.Response.success:
# loopchain success
if result:
try:
# apply tx_result_convert
result_dict = json.loads(result)
fail_status = bool(result_dict.get('failure'))
if fail_status:
error_code = message_code.Response.fail_validate_params
message = "Invalid transaction hash."
else:
error_code = message_code.Response.success
except Exception as e:
error_message = f"your result is not json, result({result}), {e}"
Logger.warning(error_message)
error_code = message_code.Response.fail_validate_params
message = error_message
else:
error_code = message_code.Response.fail_validate_params
message = 'tx_result is empty'
else:
error_code = message_code.Response.fail_validate_params
message = "Invalid transaction hash."
else:
# fail
error_code = message_code.Response.fail_validate_params
message = "response_code is fail"
# parsing response
verify_result['response_code'] = str(error_code)
if error_code == message_code.Response.success:
verify_result['response'] = {'code': error_code}
if message:
verify_result['message'] = message
return verify_result
@staticmethod
@methods.add
async def icx_getBalance(**kwargs):
channel_name = StubCollection().conf[ConfigKey.CHANNEL]
method = 'icx_getBalance'
request = make_request(method, kwargs, ParamType.get_balance)
stub = StubCollection().icon_score_stubs[channel_name]
response = await stub.async_task().query(request)
return response_to_json_query(response, True)
@staticmethod
@methods.add
async def icx_getTotalSupply(**kwargs):
channel_name = StubCollection().conf[ConfigKey.CHANNEL]
method = 'icx_getTotalSupply'
request = make_request(method, kwargs, ParamType.get_total_supply)
stub = StubCollection().icon_score_stubs[channel_name]
response = await stub.async_task().query(request)
return response_to_json_query(response, True)
@staticmethod
@methods.add
async def icx_getLastBlock(**kwargs):
block_hash, response = await get_block_v2_by_params(block_height=-1)
return response
@staticmethod
@methods.add
async def icx_getBlockByHash(**kwargs):
block_hash, response = await get_block_v2_by_params(block_hash=kwargs["hash"])
return response
@staticmethod
@methods.add
async def icx_getBlockByHeight(**kwargs):
try:
block_height = int(kwargs["height"])
except Exception as e:
verify_result = {
'response_code': message_code.Response.fail_wrong_block_height,
'message': f"Invalid block height. error: {e}"
}
return verify_result
block_hash, response = await get_block_v2_by_params(block_height=block_height)
return response
@staticmethod
@methods.add
async def icx_getLastTransaction(**kwargs):
channel_name = StubCollection().conf[ConfigKey.CHANNEL]
return ""
@staticmethod
@methods.add
async def icx_getTransactionByAddress(**kwargs):
channel_name = StubCollection().conf[ConfigKey.CHANNEL]
address = kwargs.get("address", None)
index = kwargs.get("index", None)
if address is None or index is None:
return {
'response_code': message_code.Response.fail_illegal_params,
'message': message_code.get_response_msg(message_code.Response.fail_illegal_params)
}
channel_stub = StubCollection().channel_stubs[channel_name]
tx_list, next_index = await channel_stub.async_task().get_tx_by_address(
address=address,
index=index
)
response = {
'next_index': next_index,
'response': tx_list[:-1],
'response_code': message_code.Response.success
}
return response
def is_hex(s):
return re.fullmatch(r"^(0x)?[0-9a-f]{64}$", s or "") is not None
| [
"sanic.response.json",
"iconrpcserver.utils.message_queue.stub_collection.StubCollection",
"json.loads",
"urllib.parse.urlparse",
"iconcommons.logger.Logger.warning",
"urllib.parse.urlsplit",
"iconcommons.logger.Logger.info",
"iconrpcserver.dispatcher.validate_jsonschema_v2",
"iconrpcserver.utils.icon_service.response_to_json_query",
"jsonrpcserver.aio.AsyncMethods",
"iconrpcserver.utils.icon_service.make_request",
"iconcommons.logger.Logger.debug",
"re.fullmatch",
"iconrpcserver.utils.json_rpc.get_block_v2_by_params",
"iconrpcserver.server.rest_property.RestProperty",
"iconrpcserver.protos.message_code.get_response_msg",
"sanic.response.text"
] | [((1554, 1568), 'jsonrpcserver.aio.AsyncMethods', 'AsyncMethods', ([], {}), '()\n', (1566, 1568), False, 'from jsonrpcserver.aio import AsyncMethods\n'), ((2355, 2427), 'iconcommons.logger.Logger.info', 'Logger.info', (['f"""rest_server_v2 response with {response}"""', 'DISPATCH_V2_TAG'], {}), "(f'rest_server_v2 response with {response}', DISPATCH_V2_TAG)\n", (2366, 2427), False, 'from iconcommons.logger import Logger\n'), ((2443, 2519), 'sanic.response.json', 'sanic_response.json', (['response'], {'status': 'response.http_status', 'dumps': 'json.dumps'}), '(response, status=response.http_status, dumps=json.dumps)\n', (2462, 2519), True, 'from sanic import response as sanic_response\n'), ((3534, 3596), 'iconrpcserver.utils.icon_service.make_request', 'make_request', (['"""icx_sendTransaction"""', 'kwargs', 'ParamType.send_tx'], {}), "('icx_sendTransaction', kwargs, ParamType.send_tx)\n", (3546, 3596), False, 'from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType\n'), ((3827, 3859), 'iconrpcserver.utils.icon_service.response_to_json_query', 'response_to_json_query', (['response'], {}), '(response)\n', (3849, 3859), False, 'from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType\n'), ((6703, 6754), 'iconrpcserver.utils.icon_service.make_request', 'make_request', (['method', 'kwargs', 'ParamType.get_balance'], {}), '(method, kwargs, ParamType.get_balance)\n', (6715, 6754), False, 'from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType\n'), ((6892, 6930), 'iconrpcserver.utils.icon_service.response_to_json_query', 'response_to_json_query', (['response', '(True)'], {}), '(response, True)\n', (6914, 6930), False, 'from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType\n'), ((7132, 7188), 'iconrpcserver.utils.icon_service.make_request', 'make_request', (['method', 'kwargs', 'ParamType.get_total_supply'], {}), '(method, kwargs, ParamType.get_total_supply)\n', (7144, 7188), False, 'from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType\n'), ((7326, 7364), 'iconrpcserver.utils.icon_service.response_to_json_query', 'response_to_json_query', (['response', '(True)'], {}), '(response, True)\n', (7348, 7364), False, 'from iconrpcserver.utils.icon_service import make_request, response_to_json_query, ParamType\n'), ((9340, 9384), 're.fullmatch', 're.fullmatch', (['"""^(0x)?[0-9a-f]{64}$"""', "(s or '')"], {}), "('^(0x)?[0-9a-f]{64}$', s or '')\n", (9352, 9384), False, 'import re\n'), ((1813, 1854), 'sanic.response.text', 'sanic_response.text', (['"""no support method!"""'], {}), "('no support method!')\n", (1832, 1854), True, 'from sanic import response as sanic_response\n'), ((1964, 2030), 'iconcommons.logger.Logger.info', 'Logger.info', (['f"""rest_server_v2 request with {req}"""', 'DISPATCH_V2_TAG'], {}), "(f'rest_server_v2 request with {req}', DISPATCH_V2_TAG)\n", (1975, 2030), False, 'from iconcommons.logger import Logger\n'), ((2043, 2095), 'iconcommons.logger.Logger.info', 'Logger.info', (['f"""{client_ip} requested {req} on {url}"""'], {}), "(f'{client_ip} requested {req} on {url}')\n", (2054, 2095), False, 'from iconcommons.logger import Logger\n'), ((2109, 2144), 'iconrpcserver.dispatcher.validate_jsonschema_v2', 'validate_jsonschema_v2', ([], {'request': 'req'}), '(request=req)\n', (2131, 2144), False, 'from iconrpcserver.dispatcher import validate_jsonschema_v2\n'), ((2611, 2624), 'urllib.parse.urlsplit', 'urlsplit', (['url'], {}), '(url)\n', (2619, 2624), False, 'from urllib.parse import urlsplit, urlparse\n'), ((2767, 2780), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (2775, 2780), False, 'from urllib.parse import urlsplit, urlparse\n'), ((2977, 3032), 'iconcommons.logger.Logger.debug', 'Logger.debug', (['f"""Dispatch Protocol: {dispatch_protocol}"""'], {}), "(f'Dispatch Protocol: {dispatch_protocol}')\n", (2989, 3032), False, 'from iconcommons.logger import Logger\n'), ((3132, 3187), 'iconcommons.logger.Logger.debug', 'Logger.debug', (['f"""Redirect Protocol: {redirect_protocol}"""'], {}), "(f'Redirect Protocol: {redirect_protocol}')\n", (3144, 3187), False, 'from iconcommons.logger import Logger\n'), ((3288, 3334), 'iconcommons.logger.Logger.debug', 'Logger.debug', (['f"""Protocol: {dispatch_protocol}"""'], {}), "(f'Protocol: {dispatch_protocol}')\n", (3300, 3334), False, 'from iconcommons.logger import Logger\n'), ((7480, 7519), 'iconrpcserver.utils.json_rpc.get_block_v2_by_params', 'get_block_v2_by_params', ([], {'block_height': '(-1)'}), '(block_height=-1)\n', (7502, 7519), False, 'from iconrpcserver.utils.json_rpc import redirect_request_to_rs, get_block_v2_by_params\n'), ((7661, 7710), 'iconrpcserver.utils.json_rpc.get_block_v2_by_params', 'get_block_v2_by_params', ([], {'block_hash': "kwargs['hash']"}), "(block_hash=kwargs['hash'])\n", (7683, 7710), False, 'from iconrpcserver.utils.json_rpc import redirect_request_to_rs, get_block_v2_by_params\n'), ((8168, 8217), 'iconrpcserver.utils.json_rpc.get_block_v2_by_params', 'get_block_v2_by_params', ([], {'block_height': 'block_height'}), '(block_height=block_height)\n', (8190, 8217), False, 'from iconrpcserver.utils.json_rpc import redirect_request_to_rs, get_block_v2_by_params\n'), ((2828, 2842), 'iconrpcserver.server.rest_property.RestProperty', 'RestProperty', ([], {}), '()\n', (2840, 2842), False, 'from iconrpcserver.server.rest_property import RestProperty\n'), ((3615, 3631), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (3629, 3631), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((3676, 3692), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (3690, 3692), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((3884, 3900), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (3898, 3900), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((3959, 3975), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (3973, 3975), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((4516, 4532), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (4530, 4532), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((4580, 4596), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (4594, 4596), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((6609, 6625), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (6623, 6625), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((6771, 6787), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (6785, 6787), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((7034, 7050), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (7048, 7050), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((7205, 7221), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (7219, 7221), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((8349, 8365), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (8363, 8365), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((8521, 8537), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (8535, 8537), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((8821, 8893), 'iconrpcserver.protos.message_code.get_response_msg', 'message_code.get_response_msg', (['message_code.Response.fail_illegal_params'], {}), '(message_code.Response.fail_illegal_params)\n', (8850, 8893), False, 'from iconrpcserver.protos import message_code\n'), ((8932, 8948), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (8946, 8948), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((3065, 3081), 'iconrpcserver.utils.message_queue.stub_collection.StubCollection', 'StubCollection', ([], {}), '()\n', (3079, 3081), False, 'from iconrpcserver.utils.message_queue.stub_collection import StubCollection\n'), ((3411, 3425), 'iconrpcserver.server.rest_property.RestProperty', 'RestProperty', ([], {}), '()\n', (3423, 3425), False, 'from iconrpcserver.server.rest_property import RestProperty\n'), ((5075, 5093), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (5085, 5093), False, 'import json\n'), ((5613, 5642), 'iconcommons.logger.Logger.warning', 'Logger.warning', (['error_message'], {}), '(error_message)\n', (5627, 5642), False, 'from iconcommons.logger import Logger\n')] |
# coding: utf-8
from django.utils.translation import gettext_lazy as _
from pathlib import Path
SECRET_KEY = ""
SITE_ID = 1
MEDIA_ROOT = ""
STATIC_ROOT = ""
DBNAME = ""
DBUSER = ""
DBPASSWORD = ""
CELERY_BROKER_URL = ""
VIRTUAL_MAILBOX_PASS = ""
S3 = False
OCR = False
APP_USER = ""
APP_GROUP = ""
ASSETS_VER = None
# One of:
# "s3://bucket/path/to/storage"
# "local://path/to/media/root"
STORAGE_ROOT = ''
AUTH_USER_MODEL = "core.User"
WSGI_APPLICATION = 'config.wsgi.application'
ROOT_URLCONF = 'config.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# project root directory
# 1. settings 2. config 3. papermerge-proj - parent 3x
PROJ_ROOT = Path(__file__).parent.parent.parent
INSTALLED_APPS = (
'rest_framework',
'knox',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'papermerge.boss',
'papermerge.core',
'django.contrib.admin',
'django.contrib.contenttypes',
'allauth',
'allauth.account',
'allauth.socialaccount',
'dynamic_preferences',
# comment the following line if you don't want to use user preferences
'dynamic_preferences.users.apps.UserPreferencesConfig',
'polymorphic_tree',
'polymorphic',
'mptt',
# we use postgres full text search
'django.contrib.postgres',
'anymail',
'django_extensions',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [PROJ_ROOT / Path('config') / Path('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'papermerge.boss.context_processors.static_bundle_url',
'dynamic_preferences.processors.global_preferences',
],
},
},
]
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.TemporaryFileUploadHandler'
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
LANGUAGES = [
('de', _('German')),
('en', _('English')),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
PROJ_ROOT / Path('papermerge'),
)
DATE_FORMAT = '%d/%m/%Y'
DATE_INPUT_FORMATS = ['%d/%m/%Y']
ACCOUNT_SESSION_REMEMBER = False
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_REQUIRED = True
LOGIN_REDIRECT_URL = '/dashboard/'
LOGOUT_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
MEDIA_URL = '/documents/'
ALLOWED_HOSTS = [
'*',
]
AUTHENTICATION_BACKENDS = (
'papermerge.core.auth.NodeAuthBackend',
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
ACCOUNT_SESSION_REMEMBER = False
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_REQUIRED = True
# Determines the e-mail verification method during signup – choose one of
# "mandatory", "optional", or "none". When set to “mandatory” the user is
# blocked from logging in until the email address is verified. Choose “optional”
# or “none” to allow logins with an unverified e-mail address. In case of
# “optional”, the e-mail verification mail is still sent, whereas in case of
# “none” no e-mail verification mails are sent.
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/login/'
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/boss/'
LOGIN_REDIRECT_URL = '/boss/'
LOGOUT_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
STATIC_URL = '/static/'
# Pixel height used by PDFTOPPM utility. It means basically
# that all generated images will be of following heights only
# min_height, min_height + step, min_height + 2*step, ..., max_height
# When zoomin in and out, uility cycles through these values.
PDFTOPPM_STEP = 100
PDFTOPPM_MIN_HEIGHT = 100
PDFTOPPM_DEFAULT_HEIGHT = 900
PDFTOPPM_MAX_HEIGHT = 1500
# Value must be an integer from 1 to 100
# This values trades off quality agains size/complexity
# 100 - is perfect quality jpeg image, but larger in size
# 1 - poorest quality jpeg image - uses smallest amount of space
PDFTOPPM_JPEG_QUALITY = 90
# = 1 GB of space per tenant
MAX_STORAGE_SIZE = 1 * 1024 * 1024
UPLOAD_FILE_SIZE_MAX = 12 * 1024 * 1024
UPLOAD_FILE_SIZE_MIN = 1
UPLOAD_ALLOWED_MIMETYPES = ['application/pdf']
# Tell celery to use your new serializer:
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_CREATE_MISSING_QUEUES = True
CELERY_TASK_DEFAULT_EXCHANGE = 'papermerge'
CELERY_TASK_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_TASK_DEFAULT_ROUTING_KEY = 'papermerge'
CELERY_INCLUDE = 'pmworker.tasks'
CELERY_RESULT_BACKEND = 'rpc://'
CELERY_TASK_RESULT_EXPIRES = 86400
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'knox.auth.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
]
}
REST_KNOX = {
'AUTH_TOKEN_CHARACTER_LENGTH': 32,
'SECURE_HASH_ALGORITHM': 'cryptography.hazmat.primitives.hashes.SHA512',
}
| [
"django.utils.translation.gettext_lazy",
"pathlib.Path"
] | [((2973, 2984), 'django.utils.translation.gettext_lazy', '_', (['"""German"""'], {}), "('German')\n", (2974, 2984), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2998, 3010), 'django.utils.translation.gettext_lazy', '_', (['"""English"""'], {}), "('English')\n", (2999, 3010), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3113, 3131), 'pathlib.Path', 'Path', (['"""papermerge"""'], {}), "('papermerge')\n", (3117, 3131), False, 'from pathlib import Path\n'), ((823, 837), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (827, 837), False, 'from pathlib import Path\n'), ((2170, 2187), 'pathlib.Path', 'Path', (['"""templates"""'], {}), "('templates')\n", (2174, 2187), False, 'from pathlib import Path\n'), ((2153, 2167), 'pathlib.Path', 'Path', (['"""config"""'], {}), "('config')\n", (2157, 2167), False, 'from pathlib import Path\n')] |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
def migrate(apps, schema_editor):
Sex = apps.get_model('species', 'Sex')
CollectionSpecies = apps.get_model('species', 'CollectionSpecies')
for cs in CollectionSpecies.objects.all():
if cs.old_sex:
if cs.old_sex == 'both':
s = 'mixed'
elif cs.old_sex not in ['male', 'female', 'mixed', 'unknown']:
s = 'unknown'
else:
s = cs.old_sex
cs.sex = Sex.objects.get(name=s)
cs.save()
def rollback(apps, schema_editor):
CollectionSpecies = apps.get_model('species', 'CollectionSpecies')
for cs in CollectionSpecies.objects.all():
if cs.sex:
cs.sex = None
cs.old_sex = ''
cs.save()
dependencies = [
('species', '0004_sex'),
]
operations = [
migrations.RenameField(
model_name='collectionspecies',
old_name='sex',
new_name='old_sex',
),
migrations.RenameField(
model_name='trapspecies',
old_name='sex',
new_name='old_sex',
),
migrations.AddField(
model_name='collectionspecies',
name='sex',
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='collection_species', to='species.Sex'),
),
migrations.AddField(
model_name='trapspecies',
name='sex',
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='trap_species', to='species.Sex'),
),
migrations.RunPython(migrate, rollback),
migrations.RemoveField(
model_name='collectionspecies',
name='old_sex',
),
migrations.RemoveField(
model_name='trapspecies',
name='old_sex',
),
]
| [
"django.db.migrations.RemoveField",
"django.db.migrations.RunPython",
"django.db.migrations.RenameField",
"django.db.models.ForeignKey"
] | [((1031, 1125), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""collectionspecies"""', 'old_name': '"""sex"""', 'new_name': '"""old_sex"""'}), "(model_name='collectionspecies', old_name='sex',\n new_name='old_sex')\n", (1053, 1125), False, 'from django.db import migrations, models\n'), ((1178, 1267), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""trapspecies"""', 'old_name': '"""sex"""', 'new_name': '"""old_sex"""'}), "(model_name='trapspecies', old_name='sex', new_name=\n 'old_sex')\n", (1200, 1267), False, 'from django.db import migrations, models\n'), ((1881, 1920), 'django.db.migrations.RunPython', 'migrations.RunPython', (['migrate', 'rollback'], {}), '(migrate, rollback)\n', (1901, 1920), False, 'from django.db import migrations, models\n'), ((1930, 2000), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""collectionspecies"""', 'name': '"""old_sex"""'}), "(model_name='collectionspecies', name='old_sex')\n", (1952, 2000), False, 'from django.db import migrations, models\n'), ((2045, 2109), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""trapspecies"""', 'name': '"""old_sex"""'}), "(model_name='trapspecies', name='old_sex')\n", (2067, 2109), False, 'from django.db import migrations, models\n'), ((1426, 1556), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""collection_species"""', 'to': '"""species.Sex"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='collection_species', to='species.Sex')\n", (1443, 1556), False, 'from django.db import migrations, models\n'), ((1707, 1831), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""trap_species"""', 'to': '"""species.Sex"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='trap_species', to='species.Sex')\n", (1724, 1831), False, 'from django.db import migrations, models\n')] |
import unittest
from unittest.mock import Mock, patch
from ecs_scheduler.datacontext import (
Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists,
JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields
)
class JobsTests(unittest.TestCase):
def setUp(self):
self._store = Mock()
self._store.load_all.return_value = {'id': 1}, {'id': 2}
with patch('ecs_scheduler.datacontext.JobCreateSchema') as sp, \
patch('ecs_scheduler.datacontext.RLock') as rp:
self._schema = sp.return_value
self._schema.load.side_effect = lambda d: (d, {})
self._schema.dump.side_effect = \
lambda d: Mock(data={'validated': True, **d})
self._lock = rp.return_value
self._target = Jobs.load(self._store)
@patch('ecs_scheduler.datacontext.persistence')
def test_load_selects_auto_store_if_not_specified(self, persistence):
result = Jobs.load()
self.assertIsNotNone(result)
persistence.resolve.assert_called()
persistence.resolve.return_value.load_all.assert_called_with()
def test_get_all_returns_all(self):
self._store.load_all.assert_called_with()
self.assertCountEqual([1, 2], [j.id for j in self._target.get_all()])
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_len_gets_jobs_length(self):
self.assertEqual(2, self._target.total())
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_get_retrieves_job(self):
result = self._target.get(2)
self.assertIsInstance(result, Job)
self.assertEqual(2, result.id)
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_get_raises_error(self):
with self.assertRaises(JobNotFound) as cm:
self._target.get(3)
self.assertEqual(3, cm.exception.job_id)
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_create_new_job(self):
data = {'id': 4, 'foo': 'bar'}
result = self._target.create(data)
self.assertIsInstance(result, Job)
self.assertEqual(4, result.id)
self.assertIs(result, self._target.get(4))
self.assertEqual(3, self._target.total())
self._store.create.assert_called_with(4, {'validated': True, **data})
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_create_raises_if_invalid_data(self):
self._schema.load.side_effect = lambda d: (d, {'error': 'bad'})
data = {'id': 4, 'foo': 'bar'}
with self.assertRaises(InvalidJobData) as cm:
self._target.create(data)
self.assertEqual(4, cm.exception.job_id)
self.assertEqual({'error': 'bad'}, cm.exception.errors)
self.assertEqual(2, self._target.total())
self._store.create.assert_not_called()
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_create_raises_with_missing_id(self):
self._schema.load.side_effect = lambda d: (d, {'error': 'noid'})
data = {'foo': 'bar'}
with self.assertRaises(InvalidJobData) as cm:
self._target.create(data)
self.assertIsNone(cm.exception.job_id)
self.assertEqual({'error': 'noid'}, cm.exception.errors)
self.assertEqual(2, self._target.total())
self._store.create.assert_not_called()
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_create_raises_if_duplicate_job(self):
data = {'id': 1, 'foo': 'bar'}
with self.assertRaises(JobAlreadyExists) as cm:
self._target.create(data)
self.assertEqual(1, cm.exception.job_id)
self.assertEqual(2, self._target.total())
self._store.create.assert_not_called()
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_create_raises_if_store_fails(self):
self._store.create.side_effect = RuntimeError
data = {'id': 4, 'foo': 'bar'}
with self.assertRaises(JobPersistenceError) as cm:
self._target.create(data)
self.assertEqual(4, cm.exception.job_id)
self.assertEqual(2, self._target.total())
self._store.create.assert_called_with(4, {'validated': True, **data})
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_delete_job(self):
self._target.delete(1)
self.assertEqual(1, self._target.total())
with self.assertRaises(JobNotFound):
self._target.get(1)
self._store.delete.assert_called_with(1)
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_delete_raises_if_no_job(self):
with self.assertRaises(JobNotFound) as cm:
self._target.delete(3)
self.assertEqual(3, cm.exception.job_id)
self.assertEqual(2, self._target.total())
self._store.delete.assert_not_called()
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_delete_raises_if_store_error(self):
self._store.delete.side_effect = RuntimeError
with self.assertRaises(JobPersistenceError) as cm:
self._target.delete(2)
self.assertEqual(2, cm.exception.job_id)
self.assertEqual(2, self._target.total())
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
class JobTests(unittest.TestCase):
def setUp(self):
self._store = Mock()
self._job_data = {'id': 32, 'foo': 'bar'}
with patch('ecs_scheduler.datacontext.JobSchema') as sp, \
patch('ecs_scheduler.datacontext.RLock') as rp:
self._schema = sp.return_value
self._lock = rp.return_value
self._target = Job(self._job_data, self._store)
self._schema.load.side_effect = lambda d: (d, {})
self._schema.dump.side_effect = \
lambda d: Mock(data={'validated': True, **d})
def test_data_returns_all_data(self):
self.assertEqual(self._job_data, self._target.data)
def test_data_is_read_only(self):
with self.assertRaises(TypeError):
self._target.data['baz'] = 'bort'
def test_id_property_returns_id(self):
self.assertEqual(32, self._target.id)
def test_suspended_property_missing(self):
self.assertFalse(self._target.suspended)
def test_suspended_property_field(self):
self._job_data['suspended'] = True
self.assertTrue(self._target.suspended)
def test_parsed_schedule_field(self):
self._job_data['parsedSchedule'] = 'parsed'
self.assertEqual('parsed', self._target.parsed_schedule)
def test_update(self):
new_data = {'a': 1, 'b': 2}
self._target.update(new_data)
self.assertEqual(1, self._target.data['a'])
self.assertEqual(2, self._target.data['b'])
self._store.update.assert_called_with(
32, {'validated': True, **new_data}
)
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_update_changes_existing_fields(self):
new_data = {'a': 1, 'foo': 'baz'}
self._target.update(new_data)
self.assertEqual(1, self._target.data['a'])
self.assertEqual('baz', self._target.data['foo'])
self._store.update.assert_called_with(
32, {'validated': True, **new_data}
)
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_update_does_not_allow_id_override(self):
job_with_real_schema = Job({'id': 44, 'foo': 'bar'}, self._store)
new_data = {'id': 77, 'taskCount': 4}
job_with_real_schema.update(new_data)
self.assertEqual(44, job_with_real_schema.id)
self._store.update.assert_called_with(44, {'taskCount': 4})
def test_update_raises_if_invalid_data(self):
self._schema.load.side_effect = lambda d: (d, {'error': 'bad'})
new_data = {'a': 1, 'b': 2}
with self.assertRaises(InvalidJobData) as cm:
self._target.update(new_data)
self.assertEqual(32, cm.exception.job_id)
self.assertEqual({'error': 'bad'}, cm.exception.errors)
self.assertNotIn('a', self._target.data)
self.assertNotIn('b', self._target.data)
self._store.update.assert_not_called()
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_update_raises_if_store_error(self):
self._store.update.side_effect = RuntimeError
new_data = {'a': 1, 'b': 2}
with self.assertRaises(JobPersistenceError) as cm:
self._target.update(new_data)
self.assertEqual(32, cm.exception.job_id)
self.assertNotIn('a', self._target.data)
self.assertNotIn('b', self._target.data)
self._store.update.assert_called_with(
32, {'validated': True, **new_data}
)
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_annotate(self):
self._schema.load.side_effect = lambda d: ({}, {})
new_data = {'a': 1, 'b': 2}
self._target.annotate(new_data)
self.assertEqual(1, self._target.data['a'])
self.assertEqual(2, self._target.data['b'])
self._store.update.assert_not_called()
self._lock.__enter__.assert_called()
self._lock.__exit__.assert_called()
def test_annotate_does_not_allow_id_override(self):
job_with_real_schema = Job({'id': 44, 'foo': 'bar'}, self._store)
new_data = {'id': 77, 'b': 2}
with self.assertRaises(ImmutableJobFields) as cm:
job_with_real_schema.annotate(new_data)
self.assertEqual(44, cm.exception.job_id)
self.assertCountEqual(['id'], cm.exception.fields)
self.assertNotIn('b', job_with_real_schema.data)
self._store.update.assert_not_called()
def test_annotate_does_not_allow_setting_persistent_fields(self):
job_with_real_schema = Job({'id': 44, 'foo': 'bar'}, self._store)
new_data = {'taskCount': 4, 'schedule': '* *', 'b': 2}
with self.assertRaises(JobFieldsRequirePersistence) as cm:
job_with_real_schema.annotate(new_data)
self.assertEqual(44, cm.exception.job_id)
self.assertCountEqual(
{'taskCount', 'schedule', 'parsedSchedule'}, cm.exception.fields
)
self.assertNotIn('taskCount', job_with_real_schema.data)
self.assertNotIn('schedule', job_with_real_schema.data)
self.assertNotIn('b', job_with_real_schema.data)
self._store.update.assert_not_called()
class JobDataMappingTests(unittest.TestCase):
def setUp(self):
self._data = {'a': 1, 'b': 2}
self._target = JobDataMapping(self._data)
def test_get(self):
self.assertEqual(self._data['a'], self._target['a'])
def test_set_unsupported(self):
with self.assertRaises(TypeError):
self._target['c'] = 3
def test_iterate(self):
expected = list(iter(self._data))
actual = list(iter(self._target))
self.assertCountEqual(expected, actual)
def test_length(self):
self.assertEqual(len(self._data), len(self._target))
| [
"ecs_scheduler.datacontext.Job",
"unittest.mock.Mock",
"ecs_scheduler.datacontext.JobDataMapping",
"ecs_scheduler.datacontext.Jobs.load",
"unittest.mock.patch"
] | [((848, 894), 'unittest.mock.patch', 'patch', (['"""ecs_scheduler.datacontext.persistence"""'], {}), "('ecs_scheduler.datacontext.persistence')\n", (853, 894), False, 'from unittest.mock import Mock, patch\n'), ((329, 335), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (333, 335), False, 'from unittest.mock import Mock, patch\n'), ((986, 997), 'ecs_scheduler.datacontext.Jobs.load', 'Jobs.load', ([], {}), '()\n', (995, 997), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((5763, 5769), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (5767, 5769), False, 'from unittest.mock import Mock, patch\n'), ((7896, 7938), 'ecs_scheduler.datacontext.Job', 'Job', (["{'id': 44, 'foo': 'bar'}", 'self._store'], {}), "({'id': 44, 'foo': 'bar'}, self._store)\n", (7899, 7938), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((9840, 9882), 'ecs_scheduler.datacontext.Job', 'Job', (["{'id': 44, 'foo': 'bar'}", 'self._store'], {}), "({'id': 44, 'foo': 'bar'}, self._store)\n", (9843, 9882), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((10348, 10390), 'ecs_scheduler.datacontext.Job', 'Job', (["{'id': 44, 'foo': 'bar'}", 'self._store'], {}), "({'id': 44, 'foo': 'bar'}, self._store)\n", (10351, 10390), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((11106, 11132), 'ecs_scheduler.datacontext.JobDataMapping', 'JobDataMapping', (['self._data'], {}), '(self._data)\n', (11120, 11132), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((414, 464), 'unittest.mock.patch', 'patch', (['"""ecs_scheduler.datacontext.JobCreateSchema"""'], {}), "('ecs_scheduler.datacontext.JobCreateSchema')\n", (419, 464), False, 'from unittest.mock import Mock, patch\n'), ((490, 530), 'unittest.mock.patch', 'patch', (['"""ecs_scheduler.datacontext.RLock"""'], {}), "('ecs_scheduler.datacontext.RLock')\n", (495, 530), False, 'from unittest.mock import Mock, patch\n'), ((819, 841), 'ecs_scheduler.datacontext.Jobs.load', 'Jobs.load', (['self._store'], {}), '(self._store)\n', (828, 841), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((5833, 5877), 'unittest.mock.patch', 'patch', (['"""ecs_scheduler.datacontext.JobSchema"""'], {}), "('ecs_scheduler.datacontext.JobSchema')\n", (5838, 5877), False, 'from unittest.mock import Mock, patch\n'), ((5903, 5943), 'unittest.mock.patch', 'patch', (['"""ecs_scheduler.datacontext.RLock"""'], {}), "('ecs_scheduler.datacontext.RLock')\n", (5908, 5943), False, 'from unittest.mock import Mock, patch\n'), ((6062, 6094), 'ecs_scheduler.datacontext.Job', 'Job', (['self._job_data', 'self._store'], {}), '(self._job_data, self._store)\n', (6065, 6094), False, 'from ecs_scheduler.datacontext import Jobs, Job, JobDataMapping, JobNotFound, InvalidJobData, JobAlreadyExists, JobPersistenceError, JobFieldsRequirePersistence, ImmutableJobFields\n'), ((6217, 6252), 'unittest.mock.Mock', 'Mock', ([], {'data': "{'validated': True, **d}"}), "(data={'validated': True, **d})\n", (6221, 6252), False, 'from unittest.mock import Mock, patch\n'), ((715, 750), 'unittest.mock.Mock', 'Mock', ([], {'data': "{'validated': True, **d}"}), "(data={'validated': True, **d})\n", (719, 750), False, 'from unittest.mock import Mock, patch\n')] |
import glob
import cv2
ff = glob.glob("./samples/*.jpg")
ff_sorted = sorted([f"./samples/{int(f.split('/')[-1].split('-')[0]):06d}-images.jpg" for f in ff])
img_array = []
size = None
for filename in ff_sorted:
img = cv2.imread(f"./samples/{int(filename.split('/')[-1].split('-')[0])}-images.jpg")
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
out = cv2.VideoWriter("project.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 3, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
| [
"cv2.VideoWriter_fourcc",
"glob.glob"
] | [((30, 58), 'glob.glob', 'glob.glob', (['"""./samples/*.jpg"""'], {}), "('./samples/*.jpg')\n", (39, 58), False, 'import glob\n'), ((434, 465), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (456, 465), False, 'import cv2\n')] |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import numpy as np
from hummingbird.ml._utils import pandas_installed
if pandas_installed():
from pandas import DataFrame
else:
DataFrame = None
class BatchContainer:
def __init__(self, base_container, remainder_model_container=None):
"""
A wrapper around one or two containers to do batch by batch prediction. The batch size is
fixed when `base_container` is created. Together with `remainder_model_container`, this class
enables prediction on a dataset of size `base_container._batch_size` * k +
`remainder_model_container._batch_size`, where k is any integer. Its `predict` related method
optionally takes `concatenate_outputs` argument, which when set to True causes the outputs to
be returned as a list of individual prediction. This avoids an extra allocation of an output array
and copying of each batch prediction into it.
Args:
base_container: One of subclasses of `SklearnContainer`.
remainder_model_container: An auxiliary container that is used in the last iteration,
if the test input batch size is not devisible by `base_container._batch_size`.
"""
assert base_container._batch_size is not None
self._base_container = base_container
self._batch_size = base_container._batch_size
if remainder_model_container:
assert remainder_model_container._batch_size is not None
self._remainder_model_container = remainder_model_container
self._remainder_size = remainder_model_container._batch_size
else:
# This is remainder_size == 0 case
# We repurpose base_container as a remainder_model_container
self._remainder_model_container = base_container
self._remainder_size = base_container._batch_size
def __getattr__(self, name):
return getattr(self._base_container, name)
def decision_function(self, *inputs, concatenate_outputs=True):
return self._predict_common(
self._base_container.decision_function,
self._remainder_model_container.decision_function,
*inputs,
concatenate_outputs=concatenate_outputs
)
def transform(self, *inputs, concatenate_outputs=True):
return self._predict_common(
self._base_container.transform,
self._remainder_model_container.transform,
*inputs,
concatenate_outputs=concatenate_outputs
)
def score_samples(self, *inputs, concatenate_outputs=True):
return self._predict_common(
self._base_container.score_samples,
self._remainder_model_container.score_samples,
*inputs,
concatenate_outputs=concatenate_outputs
)
def predict(self, *inputs, concatenate_outputs=True):
return self._predict_common(
self._base_container.predict,
self._remainder_model_container.predict,
*inputs,
concatenate_outputs=concatenate_outputs
)
def predict_proba(self, *inputs, concatenate_outputs=True):
return self._predict_common(
self._base_container.predict_proba,
self._remainder_model_container.predict_proba,
*inputs,
concatenate_outputs=concatenate_outputs
)
def _predict_common(self, predict_func, remainder_predict_func, *inputs, concatenate_outputs=True):
if DataFrame is not None and type(inputs[0]) == DataFrame:
# Split the dataframe into column ndarrays.
inputs = inputs[0]
input_names = list(inputs.columns)
splits = [inputs[input_names[idx]] for idx in range(len(input_names))]
inputs = tuple([df.to_numpy().reshape(-1, 1) for df in splits])
def output_proc(predictions):
if concatenate_outputs:
return np.concatenate(predictions)
return predictions
is_tuple = isinstance(inputs, tuple)
if is_tuple:
total_size = inputs[0].shape[0]
else:
total_size = inputs.shape[0]
if total_size == self._batch_size:
# A single batch inference case
return output_proc([predict_func(*inputs)])
iterations = total_size // self._batch_size
iterations += 1 if total_size % self._batch_size > 0 else 0
iterations = max(1, iterations)
predictions = []
for i in range(0, iterations):
start = i * self._batch_size
end = min(start + self._batch_size, total_size)
if is_tuple:
batch = tuple([input[start:end, :] for input in inputs])
else:
batch = inputs[start:end, :]
if i == iterations - 1:
assert (end - start) == self._remainder_size
out = remainder_predict_func(*batch)
else:
out = predict_func(*batch)
predictions.append(out)
return output_proc(predictions)
| [
"hummingbird.ml._utils.pandas_installed",
"numpy.concatenate"
] | [((386, 404), 'hummingbird.ml._utils.pandas_installed', 'pandas_installed', ([], {}), '()\n', (402, 404), False, 'from hummingbird.ml._utils import pandas_installed\n'), ((4261, 4288), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (4275, 4288), True, 'import numpy as np\n')] |
import tkinter as T
# https://www.tcl.tk/man/tcl8.6/TkCmd/cursors.htm
names = """X_cursor
arrow
based_arrow_down
based_arrow_up
boat
bogosity
bottom_left_corner
bottom_right_corner
bottom_side
bottom_tee
box_spiral
center_ptr
circle
clock
coffee_mug
cross
cross_reverse
crosshair
diamond_cross
dot
dotbox
double_arrow
draft_large
draft_small
draped_box
exchange
fleur
gobbler
gumby
hand1
hand2
heart
icon
iron_cross
left_ptr
left_side
left_tee
leftbutton
ll_angle
lr_angle
man
middlebutton
mouse
none
pencil
pirate
plus
question_arrow
right_ptr
right_side
right_tee
rightbutton
rtl_logo
sailboat
sb_down_arrow
sb_h_double_arrow
sb_left_arrow
sb_right_arrow
sb_up_arrow
sb_v_double_arrow
shuttle
sizing
spider
spraycan
star
target
tcross
top_left_arrow
top_left_corner
top_right_corner
top_side
top_tee
trek
ul_angle
umbrella
ur_angle
watch
xterm
--- Windows supported ---
arrow
center_ptr
crosshair
fleur
ibeam
icon
none
sb_h_double_arrow
sb_v_double_arrow
watch
xterm
------ Windows only -----
no
starting
size
size_ne_sw
size_ns
size_nw_se
size_we
uparrow
wait
----- OS X supported ----
arrow
top_left_arrow
left_ptr
cross
crosshair
tcross
ibeam
none
xterm
------- OS X only -------
copyarrow
aliasarrow
contextualmenuarrow
movearrow
text
cross-hair
hand
openhand
closedhand
fist
pointinghand
resize
resizeleft
resizeright
resizeleftright
resizeup
resizedown
resizeupdown
resizebottomleft
resizetopleft
resizebottomright
resizetopright
notallowed
poof
wait
countinguphand
countingdownhand
countingupanddownhand
spinning
help
bucket
cancel
eyedrop
eyedrop-full
zoom-in
zoom-out"""
app = T.Tk()
for i, name in enumerate(names.split("\n")):
label = T.Label(app, text=name)
try:
label["cursor"] = name
except T.TclError:
if name.startswith("-"):
label["foreground"] = "#0000ff"
else:
label["state"] = "disabled"
label.grid(column=i // 25, row=i % 25, sticky="we")
app.mainloop()
| [
"tkinter.Tk",
"tkinter.Label"
] | [((1590, 1596), 'tkinter.Tk', 'T.Tk', ([], {}), '()\n', (1594, 1596), True, 'import tkinter as T\n'), ((1656, 1679), 'tkinter.Label', 'T.Label', (['app'], {'text': 'name'}), '(app, text=name)\n', (1663, 1679), True, 'import tkinter as T\n')] |
import pygame
import game_config as gc
from pygame import display, event, image
from time import sleep
from animal import Animal
def find_index_from_xy(x, y):
row = y // gc.IMAGE_SIZE
col = x // gc.IMAGE_SIZE
index = row * gc.NUM_TILES_SIDE + col
return row, col, index
pygame.init()
# used to initialize the pygame methods
display.set_caption('My Game')
# set the game title on the title screen
screen = display.set_mode((gc.SCREEN_SIZE, gc.SCREEN_SIZE))
# defining the window size of the screen
matched = image.load('other_assets/matched.png')
# loading the image assets
#screen.blit(matched,(0,0)) -> not needed for now
#displaying the image in full screen mode
#blit -> draw one image onto another
#display.flip() -> not needed for now
#display.flip() -> This will update the contents of the entire display
running = True
# set a boolean for the while loop
tiles = [Animal(i) for i in range(0, gc.NUM_TILES_TOTAL)]
# we will instantiate the images
current_images_displayed = []
while running: #setting up the game loop
current_events = event.get()
#This will get all the messages and remove them from the queue
for e in current_events:# looping over the events
if e.type == pygame.QUIT:#if user wants to quit exit the game loop
running = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
running = False
# pressing escape will lead to quiting of the game
if e.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
#getting the positiion of mouse after clicking on animal
row, col, index = find_index_from_xy(mouse_x, mouse_y)
# using a function to find the row and column number
if index not in current_images_displayed:
if len(current_images_displayed) > 1:
current_images_displayed = current_images_displayed[1:] + [index]
# we are doing this so that unique images get matched
#appending the index to the list current images
else:
current_images_displayed.append(index)
# Display animals
screen.fill((255, 255, 255))
# set wthe screen color to white
total_skipped = 0
for i, tile in enumerate(tiles):
current_image = tile.image if i in current_images_displayed else tile.box
# if the image is present in current images
# display it other wiise display a grey box
if not tile.skip:
screen.blit(current_image, (tile.col * gc.IMAGE_SIZE + gc.MARGIN, tile.row * gc.IMAGE_SIZE + gc.MARGIN))
#iterating over tiles and displaying them
# enumerate gives the iterator index
else:
total_skipped += 1
display.flip()
#update the screen with display.flip()
# Check for matches
if len(current_images_displayed) == 2:
idx1, idx2 = current_images_displayed
if tiles[idx1].name == tiles[idx2].name:
tiles[idx1].skip = True
tiles[idx2].skip = True
# display matched message
sleep(0.2)
screen.blit(matched, (0, 0))
display.flip()
sleep(0.5)
current_images_displayed = []
if total_skipped == len(tiles):
running = False
print('Goodbye!')
| [
"pygame.init",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.mouse.get_pos",
"time.sleep",
"pygame.display.set_caption",
"pygame.image.load",
"animal.Animal"
] | [((302, 315), 'pygame.init', 'pygame.init', ([], {}), '()\n', (313, 315), False, 'import pygame\n'), ((358, 388), 'pygame.display.set_caption', 'display.set_caption', (['"""My Game"""'], {}), "('My Game')\n", (377, 388), False, 'from pygame import display, event, image\n'), ((441, 491), 'pygame.display.set_mode', 'display.set_mode', (['(gc.SCREEN_SIZE, gc.SCREEN_SIZE)'], {}), '((gc.SCREEN_SIZE, gc.SCREEN_SIZE))\n', (457, 491), False, 'from pygame import display, event, image\n'), ((545, 583), 'pygame.image.load', 'image.load', (['"""other_assets/matched.png"""'], {}), "('other_assets/matched.png')\n", (555, 583), False, 'from pygame import display, event, image\n'), ((925, 934), 'animal.Animal', 'Animal', (['i'], {}), '(i)\n', (931, 934), False, 'from animal import Animal\n'), ((1105, 1116), 'pygame.event.get', 'event.get', ([], {}), '()\n', (1114, 1116), False, 'from pygame import display, event, image\n'), ((2911, 2925), 'pygame.display.flip', 'display.flip', ([], {}), '()\n', (2923, 2925), False, 'from pygame import display, event, image\n'), ((1610, 1632), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1630, 1632), False, 'import pygame\n'), ((3264, 3274), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (3269, 3274), False, 'from time import sleep\n'), ((3330, 3344), 'pygame.display.flip', 'display.flip', ([], {}), '()\n', (3342, 3344), False, 'from pygame import display, event, image\n'), ((3358, 3368), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (3363, 3368), False, 'from time import sleep\n')] |
"""
Created 9/06/2017
- 15 years after the creation of Slegger...
FishyFish.py
"So Long and Thanks for All the Fish"
"""
# import the goodies
import glob # to get list of file names as images for screenshots to know wher to click
import time
import pyautogui
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
global start, end
# get/set screen resolution
width, height = pyautogui.size()
# get/set the coordinates of the mouse
# <variableName> pyautogui.position()
#pyautogui.moveTo(x = None, y = None, duration = 0.0, .,.etc)
# ^^ MOVES THE MOUSE CURSOR TO A POINT ON THE SCREEN ^^
# MAGICALLY MOVE MOUSE TO 10 x coord and 10 y coord on screen
# pyautogui.moveTo(10, 10)
def testFish():
# move mouse to position that takes 2 seconds?
pyautogui.moveTo(10, 10, duration=2)
pyautogui.moveRel(200,0, duration = 1)
pyautogui.moveRel(0, 200, duration=1)
# pyautogui.doubleClick(339, 38)
# click into notepad (applicaiton is already waiting there)
## print ("mouse pos == " + str(pyautogui.displayMousePosition())) <-- 9/12/2017 removed because function is cmd line only
# X: 2580 Y: 601 (9/12/2017)\
pyautogui.click(210, 210)
# type test message
pyautogui.typewrite('Hello world!"')
pyautogui.click
return
def alphaRun():
# start timer?
start = time.time()
print ('hello')
end = time.time()
print (end - start)
# take screen shot and save to C drive
pyautogui.screenshot('c:\\temp\\screenshot_example.png')
# where (location) the program can find our scrreenshot image
#pyautogui.locateOnScreen(r'c:\temp\outbound\calc7key.png')
### <--- DO LOOPS AND CHECK UNTIL THAT PICTURE "is found"
print (str(pyautogui.locateOnScreen(r'c:\temp\fish2.png')))
# attempt to right click screen and look for fishing spot
pyautogui.alert('you have 2.5 seconds to move mouse to fishing position')
pyautogui.PAUSE = 2.5
pyautogui.click(button='right')
pyautogui.moveRel(10, 0, duration=1)
# AUTO CLICK FOR 20 NOW BUT SET TO DEPEND ON USER INPUT
pyautogui.click(clicks=80, interval=2, button='left')
# take screenie of "fishing spot"?
#pyautogui.screenshot('c:\\temp\\rightClickFish.png')
return
def importImageSelection():
app.imageSelect = filedialog.askopenfilename(initialdir = "C:\"", title="import IMAGE select", filetypes=(("png files", "*.png"), ("jpeg files", "*.jpeg"), ("all files", "*.*")))
imageSelectLocation = str(app.imageSelect)
# set variable above to tk.string
imageSelectLocationText.set(imageSelectLocation)
# take from tk.string and set it to search for that screenshot
print (str(pyautogui.locateOnScreen(imageSelectLocationText.get())))
return
def sel():
selection = "Value = " + str(var.get())
label.config(text = selection)
app = tk.Tk()
app.title('FishyFish.exe')
app.geometry('400x400+200+200')
buttonBeginFish = tk.Button(app, text="So Long and Thanks for All the Fish", width=50, command=testFish)
buttonBeginFish.pack()
## INCLUDE BUTTON FOR 'AUTO' AND 'MANUAL' FOR FISHING SPOTS
buttonAlphaRun = tk.Button(app, text="Alpha RUN", width =20, command=alphaRun)
buttonAlphaRun.pack()
labelClickNumberText = tk.StringVar()
labelClickNumberText.set("Number of auto-clicks: ")
label3 = tk.Label(app, textvariable=labelClickNumberText)
label3.place(x=35, y=100)
spinBoxNum = tk.StringVar()
s = tk.Spinbox(app, from_ = 1, to = 250, textvariable=spinBoxNum)
s.place(x=65, y=100)
buttonImageSelectCheck1 = tk.Button(text="select image file to scan as fishing spot ", width=35, command=importImageSelection)
buttonImageSelectCheck1.place(x=35, y=150)
imageSelectLocationText = tk.StringVar()
imageSelectLocationText.set("")
label1 = tk.Label(app, textvariable=imageSelectLocationText)
label1.place(x=35, y=200)
selectionOneText = tk.StringVar()
selectionOneText.set("Direction of land (screen/not compass)")
label2 = tk.Label(app, textvariable=selectionOneText)
label2.place(x=35, y=250)
app.config()
app.mainloop()
| [
"pyautogui.typewrite",
"pyautogui.moveRel",
"pyautogui.moveTo",
"pyautogui.screenshot",
"pyautogui.locateOnScreen",
"tkinter.Button",
"pyautogui.size",
"pyautogui.click",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.Spinbox",
"tkinter.Label",
"pyautogui.alert",
"time.time",
"tkinter.filedialog.askopenfilename"
] | [((410, 426), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (424, 426), False, 'import pyautogui\n'), ((2851, 2858), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2856, 2858), True, 'import tkinter as tk\n'), ((2937, 3027), 'tkinter.Button', 'tk.Button', (['app'], {'text': '"""So Long and Thanks for All the Fish"""', 'width': '(50)', 'command': 'testFish'}), "(app, text='So Long and Thanks for All the Fish', width=50,\n command=testFish)\n", (2946, 3027), True, 'import tkinter as tk\n'), ((3125, 3185), 'tkinter.Button', 'tk.Button', (['app'], {'text': '"""Alpha RUN"""', 'width': '(20)', 'command': 'alphaRun'}), "(app, text='Alpha RUN', width=20, command=alphaRun)\n", (3134, 3185), True, 'import tkinter as tk\n'), ((3233, 3247), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3245, 3247), True, 'import tkinter as tk\n'), ((3310, 3358), 'tkinter.Label', 'tk.Label', (['app'], {'textvariable': 'labelClickNumberText'}), '(app, textvariable=labelClickNumberText)\n', (3318, 3358), True, 'import tkinter as tk\n'), ((3399, 3413), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3411, 3413), True, 'import tkinter as tk\n'), ((3418, 3475), 'tkinter.Spinbox', 'tk.Spinbox', (['app'], {'from_': '(1)', 'to': '(250)', 'textvariable': 'spinBoxNum'}), '(app, from_=1, to=250, textvariable=spinBoxNum)\n', (3428, 3475), True, 'import tkinter as tk\n'), ((3528, 3632), 'tkinter.Button', 'tk.Button', ([], {'text': '"""select image file to scan as fishing spot """', 'width': '(35)', 'command': 'importImageSelection'}), "(text='select image file to scan as fishing spot ', width=35,\n command=importImageSelection)\n", (3537, 3632), True, 'import tkinter as tk\n'), ((3699, 3713), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3711, 3713), True, 'import tkinter as tk\n'), ((3755, 3806), 'tkinter.Label', 'tk.Label', (['app'], {'textvariable': 'imageSelectLocationText'}), '(app, textvariable=imageSelectLocationText)\n', (3763, 3806), True, 'import tkinter as tk\n'), ((3853, 3867), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3865, 3867), True, 'import tkinter as tk\n'), ((3940, 3984), 'tkinter.Label', 'tk.Label', (['app'], {'textvariable': 'selectionOneText'}), '(app, textvariable=selectionOneText)\n', (3948, 3984), True, 'import tkinter as tk\n'), ((786, 822), 'pyautogui.moveTo', 'pyautogui.moveTo', (['(10)', '(10)'], {'duration': '(2)'}), '(10, 10, duration=2)\n', (802, 822), False, 'import pyautogui\n'), ((827, 864), 'pyautogui.moveRel', 'pyautogui.moveRel', (['(200)', '(0)'], {'duration': '(1)'}), '(200, 0, duration=1)\n', (844, 864), False, 'import pyautogui\n'), ((870, 907), 'pyautogui.moveRel', 'pyautogui.moveRel', (['(0)', '(200)'], {'duration': '(1)'}), '(0, 200, duration=1)\n', (887, 907), False, 'import pyautogui\n'), ((1175, 1200), 'pyautogui.click', 'pyautogui.click', (['(210)', '(210)'], {}), '(210, 210)\n', (1190, 1200), False, 'import pyautogui\n'), ((1229, 1265), 'pyautogui.typewrite', 'pyautogui.typewrite', (['"""Hello world!\\""""'], {}), '(\'Hello world!"\')\n', (1248, 1265), False, 'import pyautogui\n'), ((1345, 1356), 'time.time', 'time.time', ([], {}), '()\n', (1354, 1356), False, 'import time\n'), ((1387, 1398), 'time.time', 'time.time', ([], {}), '()\n', (1396, 1398), False, 'import time\n'), ((1470, 1526), 'pyautogui.screenshot', 'pyautogui.screenshot', (['"""c:\\\\temp\\\\screenshot_example.png"""'], {}), "('c:\\\\temp\\\\screenshot_example.png')\n", (1490, 1526), False, 'import pyautogui\n'), ((1849, 1922), 'pyautogui.alert', 'pyautogui.alert', (['"""you have 2.5 seconds to move mouse to fishing position"""'], {}), "('you have 2.5 seconds to move mouse to fishing position')\n", (1864, 1922), False, 'import pyautogui\n'), ((1953, 1984), 'pyautogui.click', 'pyautogui.click', ([], {'button': '"""right"""'}), "(button='right')\n", (1968, 1984), False, 'import pyautogui\n'), ((1989, 2025), 'pyautogui.moveRel', 'pyautogui.moveRel', (['(10)', '(0)'], {'duration': '(1)'}), '(10, 0, duration=1)\n', (2006, 2025), False, 'import pyautogui\n'), ((2090, 2143), 'pyautogui.click', 'pyautogui.click', ([], {'clicks': '(80)', 'interval': '(2)', 'button': '"""left"""'}), "(clicks=80, interval=2, button='left')\n", (2105, 2143), False, 'import pyautogui\n'), ((2304, 2470), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""C:\\""""', 'title': '"""import IMAGE select"""', 'filetypes': "(('png files', '*.png'), ('jpeg files', '*.jpeg'), ('all files', '*.*'))"}), '(initialdir=\'C:"\', title=\'import IMAGE select\',\n filetypes=((\'png files\', \'*.png\'), (\'jpeg files\', \'*.jpeg\'), (\n \'all files\', \'*.*\')))\n', (2330, 2470), False, 'from tkinter import filedialog\n'), ((1734, 1781), 'pyautogui.locateOnScreen', 'pyautogui.locateOnScreen', (['"""c:\\\\temp\\\\fish2.png"""'], {}), "('c:\\\\temp\\\\fish2.png')\n", (1758, 1781), False, 'import pyautogui\n')] |
import os
import MaxwellConstruction as mx
import numpy as np
import matplotlib.pyplot as plt
import argparse
def run_vdw_case( a_eos, b_eos, sigma, TrList ):
"""
Ejecucion de casos de construccion de Maxwell
"""
# Directorio del caso y limpieza
main_dir = os.getcwd()
cases_dir = main_dir + '/a_{:.4f}_b_{:.4f}_sigma_{:.4f}/'.format(a_eos, b_eos, sigma)
os.system('rm -rf ' + cases_dir )
os.system('mkdir -p ' + cases_dir )
# Ecuacion de estado
VdW = mx.EOS('VanDerWaals',a=a_eos,b=b_eos)
# Ejecucion para cada T reducida
for Tr in TrList:
# Directorio de ejecucion
os.chdir( cases_dir )
case_name = 'Tr_{:.3f}'.format(Tr)
os.system('cp -r ../Base ' + case_name )
os.chdir( cases_dir + '/' + case_name)
# Reemplazo de propiedades
step = 0.999
if Tr < 0.6:
step = 0.9999
# Vrmin,Vrmax = mx.coexistencia(VdW, Tr, plotPV=False, step_size=step)
os.system('sed -i \'s/sigmaReplace/{:.5g}/g\' Allrun'.format(args.sigma))
os.system('sed -i \'s/a_vdw_replace/{:.5g}/g\' properties/macroProperties'.format(args.a))
os.system('sed -i \'s/b_vdw_replace/{:.5g}/g\' properties/macroProperties'.format(args.b))
os.system('sed -i \'s/T_vdw_replace/{:.7g}/g\' start/initialFields'.format(Tr*VdW.Tc()))
os.system('sed -i \'s/rho_vdw_replace/{:.7g}/g\' start/initialFields'.format(VdW.rhoc()))
# Ejecucion
print('Tr = {}'.format(Tr))
os.system('./Allclean > log.Allclean')
os.system('./Allpre > log.Allpre')
os.system('./Allrun > log.lbm')
os.chdir(main_dir)
pass
if __name__ == "__main__":
# Argumentos de consola
parser = argparse.ArgumentParser(description='Resolución del problema de construcción de Maxwell para diferentes constantes')
parser.add_argument('-a', help='Constante a de vdW', type=float, default = 0.5)
parser.add_argument('-b', help='Constante b de vdW', type=float, default = 4.0)
parser.add_argument('-sigma', help='Constante sigma', type=float, default = 0.125)
args = parser.parse_args()
# Ejecucion de casos
# Tr = [0.99, 0.9, 0.8, 0.7, 0.6]
Tr = [0.99, 0.98, 0.96, 0.94, 0.92, 0.90, 0.85, 0.80, 0.75, 0.70, 0.65, 0.60, 0.55, 0.50]
# Tr = [0.99, 0.98, 0.96, 0.94, 0.92, 0.90, 0.85, 0.80, 0.75, 0.70]
run_vdw_case( args.a, args.b, args.sigma, Tr )
| [
"argparse.ArgumentParser",
"MaxwellConstruction.EOS",
"os.getcwd",
"os.chdir",
"os.system"
] | [((294, 305), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (303, 305), False, 'import os\n'), ((402, 434), 'os.system', 'os.system', (["('rm -rf ' + cases_dir)"], {}), "('rm -rf ' + cases_dir)\n", (411, 434), False, 'import os\n'), ((441, 475), 'os.system', 'os.system', (["('mkdir -p ' + cases_dir)"], {}), "('mkdir -p ' + cases_dir)\n", (450, 475), False, 'import os\n'), ((516, 555), 'MaxwellConstruction.EOS', 'mx.EOS', (['"""VanDerWaals"""'], {'a': 'a_eos', 'b': 'b_eos'}), "('VanDerWaals', a=a_eos, b=b_eos)\n", (522, 555), True, 'import MaxwellConstruction as mx\n'), ((1761, 1779), 'os.chdir', 'os.chdir', (['main_dir'], {}), '(main_dir)\n', (1769, 1779), False, 'import os\n'), ((1877, 2003), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Resolución del problema de construcción de Maxwell para diferentes constantes"""'}), "(description=\n 'Resolución del problema de construcción de Maxwell para diferentes constantes'\n )\n", (1900, 2003), False, 'import argparse\n'), ((675, 694), 'os.chdir', 'os.chdir', (['cases_dir'], {}), '(cases_dir)\n', (683, 694), False, 'import os\n'), ((765, 804), 'os.system', 'os.system', (["('cp -r ../Base ' + case_name)"], {}), "('cp -r ../Base ' + case_name)\n", (774, 804), False, 'import os\n'), ((815, 852), 'os.chdir', 'os.chdir', (["(cases_dir + '/' + case_name)"], {}), "(cases_dir + '/' + case_name)\n", (823, 852), False, 'import os\n'), ((1618, 1656), 'os.system', 'os.system', (['"""./Allclean > log.Allclean"""'], {}), "('./Allclean > log.Allclean')\n", (1627, 1656), False, 'import os\n'), ((1666, 1700), 'os.system', 'os.system', (['"""./Allpre > log.Allpre"""'], {}), "('./Allpre > log.Allpre')\n", (1675, 1700), False, 'import os\n'), ((1710, 1741), 'os.system', 'os.system', (['"""./Allrun > log.lbm"""'], {}), "('./Allrun > log.lbm')\n", (1719, 1741), False, 'import os\n')] |
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
file_a1 = open("1.txt", 'r')
file_a2 = open("2.txt", 'r')
file_a3 = open("3.txt", 'r')
file_a4 = open("4.txt", 'r')
file_a5 = open("5.txt", 'r')
all_file = [file_a1, file_a2, file_a3, file_a4, file_a5]
tf_dict = {}
idf_dict = {}
idf_dict_val = {}
for f in all_file:
a1 = f.read()
f.close()
a1 = a1.split()
a1 = [x.lower() for x in a1]
stop = set(stopwords.words('english'))
#words = word_tokenize(a1)
wordsFiltered = []
#print ([i for i in a1 if i not in stop])
#print(a1)
for w in a1:
if w not in stop:
wordsFiltered.append(w)
stemmer = PorterStemmer()
tempStemmer = []
for w in wordsFiltered:
tempStemmer.append(stemmer.stem(w))
nama_var = "berita" + str(all_file.index(f) + 1)
tf_dict[nama_var] = {}
for wrd in tempStemmer:
if wrd in tf_dict[nama_var].keys():
tf_dict[nama_var][wrd] += 1
else:
tf_dict[nama_var][wrd] = 1
for key in tf_dict[nama_var].keys():
if key in idf_dict.keys():
idf_dict[key] += 1
else:
idf_dict[key] = 1
for key, val in idf_dict.items():
idf_dict_val[key] = val/5
print(tf_dict)
print(idf_dict_val)
# Masukin ke EXCEL
import pandas as pd
df_tf = pd.read_excel("hasil tf-idf.xlsx", "TF")
print(df_tf.head())
| [
"nltk.corpus.stopwords.words",
"nltk.stem.PorterStemmer",
"pandas.read_excel"
] | [((1378, 1418), 'pandas.read_excel', 'pd.read_excel', (['"""hasil tf-idf.xlsx"""', '"""TF"""'], {}), "('hasil tf-idf.xlsx', 'TF')\n", (1391, 1418), True, 'import pandas as pd\n'), ((710, 725), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (723, 725), False, 'from nltk.stem import PorterStemmer\n'), ((471, 497), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (486, 497), False, 'from nltk.corpus import stopwords\n')] |
from data.database import (
RepositoryTag,
Repository,
Namespace,
get_epoch_timestamp,
)
def lookup_unrecoverable_tags(repo):
"""
Returns the tags in a repository that are expired and past their time machine recovery period.
"""
expired_clause = get_epoch_timestamp() - Namespace.removed_tag_expiration_s
return (
RepositoryTag.select()
.join(Repository)
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(RepositoryTag.repository == repo)
.where(
~(RepositoryTag.lifetime_end_ts >> None),
RepositoryTag.lifetime_end_ts <= expired_clause,
)
)
| [
"data.database.get_epoch_timestamp",
"data.database.RepositoryTag.select"
] | [((281, 302), 'data.database.get_epoch_timestamp', 'get_epoch_timestamp', ([], {}), '()\n', (300, 302), False, 'from data.database import RepositoryTag, Repository, Namespace, get_epoch_timestamp\n'), ((361, 383), 'data.database.RepositoryTag.select', 'RepositoryTag.select', ([], {}), '()\n', (381, 383), False, 'from data.database import RepositoryTag, Repository, Namespace, get_epoch_timestamp\n')] |
#!/usr/bin/env python
# encoding:utf-8
# @Time : 2019/9/23
# @Author : 茶葫芦
# @Site :
# @File : main_test.py
import numpy as np
from models.knn.Knn import knn_classfier
from sklearn.datasets import load_iris
from pub.model_selection import train_test_split
from pub.processing import meanStdStandardScaler
from models.linearRegression.sample_lnregression import linearRegression
if __name__ == '__main__':
# KNN
# mstd=meanStdStandardScaler()
# iris=load_iris()
# x=iris.data
# y=iris.target
# w,v,a,b=train_test_split(x,y)
# mstd.fit(w)
# w=mstd.transform(w)
# a=mstd.transform(a)
# knn=knn_classfier(5)
# knn.fit(w,v)
# y_pre=knn.score(a,b)
# print(y_pre)
#lnrg
lnrg=linearRegression()
np.random.seed(100)
x = np.random.random(1000)
y = 200 * x + 19 +np.random.normal(size=1000)
x_n,y_n,x_t,y_t=train_test_split(x,y)
model=lnrg.fit(x_n,y_n)
print(model.a_,model.b_)
print(lnrg.score(y_t,lnrg.predict(x_t)))
| [
"numpy.random.normal",
"numpy.random.random",
"models.linearRegression.sample_lnregression.linearRegression",
"numpy.random.seed",
"pub.model_selection.train_test_split"
] | [((739, 757), 'models.linearRegression.sample_lnregression.linearRegression', 'linearRegression', ([], {}), '()\n', (755, 757), False, 'from models.linearRegression.sample_lnregression import linearRegression\n'), ((762, 781), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (776, 781), True, 'import numpy as np\n'), ((790, 812), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (806, 812), True, 'import numpy as np\n'), ((883, 905), 'pub.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {}), '(x, y)\n', (899, 905), False, 'from pub.model_selection import train_test_split\n'), ((835, 862), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (851, 862), True, 'import numpy as np\n')] |
#
# Copyright (c) 2014 - 2019 StorPool.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Low-level helpers for the StorPool JsonObject implementation. """
from __future__ import print_function
import sys
import six
try:
import simplejson as js
except ImportError:
print('simplejson unavailable, fall-back to standard python json',
file=sys.stderr)
import json as js
from . import spcatch
SORT_KEYS = False
INDENT = None
SEPARATORS = (',', ':')
load = js.load # pylint: disable=invalid-name
loads = js.loads # pylint: disable=invalid-name
def dump(obj, filep):
""" Serialize an object with reasonable default settings. """
return js.dump(obj, filep, cls=JsonEncoder, sort_keys=SORT_KEYS,
indent=INDENT, separators=SEPARATORS)
def dumps(obj):
""" Serialize an object to a string with reasonable default settings. """
return js.dumps(obj, cls=JsonEncoder, sort_keys=SORT_KEYS,
indent=INDENT, separators=SEPARATORS)
class JsonEncoder(js.JSONEncoder):
""" Help serialize a JsonObject instance. """
def default(self, o):
""" Invoke a suitable serialization function. """
# pylint: disable=method-hidden
# (this is by design; see json.JSONEncoder.default())
if isinstance(o, JsonObjectImpl):
return o.to_json()
if isinstance(o, set):
return list(o)
return super(JsonEncoder, self).default(o)
class JsonObjectImpl(object):
""" Base class for a serializable value object; see JsonObject. """
def __new__(cls, json=None, **kwargs):
""" Construct a value object as per its __jsonAttrDefs__. """
if isinstance(json, cls):
assert not kwargs, \
"Unsupported update on already contructed object"
return json
j = dict(json) if json is not None else {}
j.update(kwargs)
self = super(JsonObjectImpl, cls).__new__(cls)
object.__setattr__(self, '__jsonAttrs__', {})
exc = None
for attr, attr_def in six.iteritems(self.__jsonAttrDefs__):
data = []
# pylint: disable=cell-var-from-loop
# (the "handle" and "func" arguments are always
# evaluated immediately, never deferred)
exc = spcatch.sp_catch(
data.append,
lambda: attr_def.handleVal(j[attr]) if attr in j
else attr_def.defaultVal(),
exc)
if data:
self.__jsonAttrs__[attr] = data[0]
else:
self.__jsonAttrs__[attr] = None
spcatch.sp_caught(exc, self.__class__.__name__, self)
return self
def __getattr__(self, attr):
if attr not in self.__jsonAttrs__:
error = "'{cls}' has no attribute '{attr}'".format(
cls=self.__class__.__name__, attr=attr)
raise AttributeError(error)
return self.__jsonAttrs__[attr]
def __setattr__(self, attr, value):
if attr not in self.__jsonAttrDefs__:
error = "'{cls}' has no attribute '{attr}'".format(
cls=self.__class__.__name__, attr=attr)
raise AttributeError(error)
self.__jsonAttrs__[attr] = self.__jsonAttrDefs__[attr].handleVal(value)
def to_json(self):
""" Store the member fields into a dictionary. """
return dict(
(attr, getattr(self, attr)) for attr in self.__jsonAttrDefs__)
def __iter__(self):
return six.iteritems(self.to_json())
# obsolete, will be deprecated and removed
toJson = to_json
_asdict = to_json
__str__ = __repr__ = lambda self: str(self.to_json())
| [
"json.dumps",
"json.dump",
"six.iteritems"
] | [((1200, 1299), 'json.dump', 'js.dump', (['obj', 'filep'], {'cls': 'JsonEncoder', 'sort_keys': 'SORT_KEYS', 'indent': 'INDENT', 'separators': 'SEPARATORS'}), '(obj, filep, cls=JsonEncoder, sort_keys=SORT_KEYS, indent=INDENT,\n separators=SEPARATORS)\n', (1207, 1299), True, 'import json as js\n'), ((1422, 1515), 'json.dumps', 'js.dumps', (['obj'], {'cls': 'JsonEncoder', 'sort_keys': 'SORT_KEYS', 'indent': 'INDENT', 'separators': 'SEPARATORS'}), '(obj, cls=JsonEncoder, sort_keys=SORT_KEYS, indent=INDENT,\n separators=SEPARATORS)\n', (1430, 1515), True, 'import json as js\n'), ((2601, 2637), 'six.iteritems', 'six.iteritems', (['self.__jsonAttrDefs__'], {}), '(self.__jsonAttrDefs__)\n', (2614, 2637), False, 'import six\n')] |
import wholesomebot.environment.settings as cfg
import gensim
import re
from collections import defaultdict
from metaphone import doublemetaphone as meta
model = gensim.models.KeyedVectors.load_word2vec_format(f'{cfg.DATA_DIR}/GoogleNews-vectors-negative300.bin',
binary=True)
words = model.index2word
w_rank = {}
w_meta = {}
for i, word in enumerate(words):
w_rank[word] = i
w_meta[word] = meta(word)[0]
def invert_dict(d):
inv = defaultdict(list)
return {inv[v].append(k) for k, v in d.items()}
METAS = invert_dict(w_meta)
WORDS = w_rank
def words(text): return re.findall(r'\w+', text.lower())
def P(word):
return - WORDS.get(word, 0)
def meta_candidates(word):
return METAS.get(meta(word)[0], None)
def correction(word):
return max(candidates(word), key=P)
def candidates(word):
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
return set(w for w in words if w in WORDS)
def edits1(word):
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word)+1)]
deletes = [L+R[1:] for L, R in splits if R]
transposes = [L+R[1]+R[0]+R[2:] for L, R in splits if len(R) > 1]
replaces = [L+c+R[1:] for L, R in splits if R for c in letters]
inserts = [L+c+R for L, R in splits for c in letters]
return set(deletes+transposes+replaces+inserts)
def edits2(word):
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
| [
"gensim.models.KeyedVectors.load_word2vec_format",
"collections.defaultdict",
"metaphone.doublemetaphone"
] | [((164, 283), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['f"""{cfg.DATA_DIR}/GoogleNews-vectors-negative300.bin"""'], {'binary': '(True)'}), "(\n f'{cfg.DATA_DIR}/GoogleNews-vectors-negative300.bin', binary=True)\n", (211, 283), False, 'import gensim\n'), ((505, 522), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (516, 522), False, 'from collections import defaultdict\n'), ((459, 469), 'metaphone.doublemetaphone', 'meta', (['word'], {}), '(word)\n', (463, 469), True, 'from metaphone import doublemetaphone as meta\n'), ((774, 784), 'metaphone.doublemetaphone', 'meta', (['word'], {}), '(word)\n', (778, 784), True, 'from metaphone import doublemetaphone as meta\n')] |
import unittest
import sys
import os
import time
DEBUG_TEST_ARTICLE = False
DEBUG_TIME = True
sys.path.append("C:/GitHub/SchoolNetUtilities")
import wikipedia.xml_to_txt as xml_to_txt
class TestSetCreation(unittest.TestCase):
def test_parse_article(self):
file_dir = os.path.dirname(os.path.realpath('__file__'))
file_path = os.path.join(file_dir, "example_article_wt.txt")
file = open(file_path, "r", encoding="UTF-8")
text = file.read()
file.close()
if (DEBUG_TEST_ARTICLE):
print("=======================================================")
print(text)
print("=======================================================")
# parse the file using the wikitextparser
start_time = time.time()
parsed = xml_to_txt.parseArticle(text)
if (DEBUG_TEST_ARTICLE):
print(parsed)
print("=======================================================")
if (DEBUG_TIME):
elapsed_time = time.time() - start_time
print("Time elapsed: ", elapsed_time)
print("=======================================================")
# parse the file with our modified plain_text function
start_time = time.time()
parsedFast = xml_to_txt.parseArticleFast(text)
if (DEBUG_TEST_ARTICLE):
print(parsedFast)
print("=======================================================")
if (DEBUG_TIME):
elapsed_time = time.time() - start_time
print("Time elapsed: ", elapsed_time)
print("=======================================================")
# self.assertEqual(parsedFast, parsed)
if __name__ == '__main__':
unittest.main() | [
"wikipedia.xml_to_txt.parseArticleFast",
"wikipedia.xml_to_txt.parseArticle",
"os.path.join",
"os.path.realpath",
"time.time",
"unittest.main",
"sys.path.append"
] | [((96, 143), 'sys.path.append', 'sys.path.append', (['"""C:/GitHub/SchoolNetUtilities"""'], {}), "('C:/GitHub/SchoolNetUtilities')\n", (111, 143), False, 'import sys\n'), ((1763, 1778), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1776, 1778), False, 'import unittest\n'), ((349, 397), 'os.path.join', 'os.path.join', (['file_dir', '"""example_article_wt.txt"""'], {}), "(file_dir, 'example_article_wt.txt')\n", (361, 397), False, 'import os\n'), ((784, 795), 'time.time', 'time.time', ([], {}), '()\n', (793, 795), False, 'import time\n'), ((813, 842), 'wikipedia.xml_to_txt.parseArticle', 'xml_to_txt.parseArticle', (['text'], {}), '(text)\n', (836, 842), True, 'import wikipedia.xml_to_txt as xml_to_txt\n'), ((1270, 1281), 'time.time', 'time.time', ([], {}), '()\n', (1279, 1281), False, 'import time\n'), ((1303, 1336), 'wikipedia.xml_to_txt.parseArticleFast', 'xml_to_txt.parseArticleFast', (['text'], {}), '(text)\n', (1330, 1336), True, 'import wikipedia.xml_to_txt as xml_to_txt\n'), ((299, 327), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (315, 327), False, 'import os\n'), ((1033, 1044), 'time.time', 'time.time', ([], {}), '()\n', (1042, 1044), False, 'import time\n'), ((1531, 1542), 'time.time', 'time.time', ([], {}), '()\n', (1540, 1542), False, 'import time\n')] |
from onto.store import SnapshotContainer
def test_set_with_timestamp():
container = SnapshotContainer()
container.set_with_timestamp('k', 'v', (1, 0))
assert container.get('k', (1, 0)) == 'v'
def test_has_previous():
container = SnapshotContainer()
assert not container.has_previous('k')
container.set_with_timestamp('k', 'v', (1, 0))
assert container.has_previous('k')
def test_previous():
container = SnapshotContainer()
container.set_with_timestamp('k', 'v1', (1, 0))
assert container.previous('k') == 'v1'
container.set_with_timestamp('k', 'v2', (2, 0))
assert container.previous('k') == 'v2'
def test_get_with_range():
container = SnapshotContainer()
container.set_with_timestamp('k', 'v1', (1, 0))
container.set_with_timestamp('k', 'v2', (2, 0))
assert list(container.get_with_range(
key='k',
lo_excl=(1, 0),
hi_incl=(2, 0)
)) == ['v2']
assert list(container.get_with_range(
key='k',
hi_incl=(2, 0)
)) == ['v1', 'v2']
| [
"onto.store.SnapshotContainer"
] | [((90, 109), 'onto.store.SnapshotContainer', 'SnapshotContainer', ([], {}), '()\n', (107, 109), False, 'from onto.store import SnapshotContainer\n'), ((249, 268), 'onto.store.SnapshotContainer', 'SnapshotContainer', ([], {}), '()\n', (266, 268), False, 'from onto.store import SnapshotContainer\n'), ((441, 460), 'onto.store.SnapshotContainer', 'SnapshotContainer', ([], {}), '()\n', (458, 460), False, 'from onto.store import SnapshotContainer\n'), ((696, 715), 'onto.store.SnapshotContainer', 'SnapshotContainer', ([], {}), '()\n', (713, 715), False, 'from onto.store import SnapshotContainer\n')] |
""" A ListController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.List import List
class ListController(Controller):
"""Class Docstring Description
"""
def __init__(self, request: Request):
self.request = request
def show(self):
"""Show a single resource listing
ex. Model.find('id')
Get().route("/show", BlogController)
"""
id = self.request.param("id")
return List.find(id)
pass
def index(self):
"""Show several resource listings
ex. Model.all()
Get().route("/index", TodoController)
"""
return List.all()
pass
def create(self):
item = self.request.input("item")
amount = self.request.input("amount")
list = List.create({"item": item, "amount": amount})
return list
pass
def update(self):
item = self.request.input("item")
amount = self.request.input("amount")
id = self.request.param("id")
List.where("id", id).update({"item": item, "amount": amount})
return List.where("id", id).get()
pass
def destroy(self):
id = self.request.param("id")
list = List.where("id", id).get()
List.where("id", id).delete()
return list
pass | [
"app.List.List.create",
"app.List.List.find",
"app.List.List.all",
"app.List.List.where"
] | [((499, 512), 'app.List.List.find', 'List.find', (['id'], {}), '(id)\n', (508, 512), False, 'from app.List import List\n'), ((693, 703), 'app.List.List.all', 'List.all', ([], {}), '()\n', (701, 703), False, 'from app.List import List\n'), ((845, 890), 'app.List.List.create', 'List.create', (["{'item': item, 'amount': amount}"], {}), "({'item': item, 'amount': amount})\n", (856, 890), False, 'from app.List import List\n'), ((1089, 1109), 'app.List.List.where', 'List.where', (['"""id"""', 'id'], {}), "('id', id)\n", (1099, 1109), False, 'from app.List import List\n'), ((1166, 1186), 'app.List.List.where', 'List.where', (['"""id"""', 'id'], {}), "('id', id)\n", (1176, 1186), False, 'from app.List import List\n'), ((1285, 1305), 'app.List.List.where', 'List.where', (['"""id"""', 'id'], {}), "('id', id)\n", (1295, 1305), False, 'from app.List import List\n'), ((1320, 1340), 'app.List.List.where', 'List.where', (['"""id"""', 'id'], {}), "('id', id)\n", (1330, 1340), False, 'from app.List import List\n')] |
"""
The MIT License (MIT)
Copyright (c) 2020 James
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import asyncio
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
from datetime import datetime
from typing import TYPE_CHECKING, Any, Optional, Union
from .abc import Channel, M, _EndPointReturnType, _SupportsStr
from .iterators import DMChannelHistoryIterator, GroupChannelHistoryIterator
if TYPE_CHECKING:
from .clan import Clan
from .group import Group
from .image import Image
from .message import ClanMessage, GroupMessage, UserMessage
from .protobufs.steammessages_chat import (
CChatRoomIncomingChatMessageNotification as GroupMessageNotification,
CChatRoomState,
CUserChatRoomState,
)
from .state import ConnectionState
from .trade import TradeOffer
from .user import User
__all__ = (
"DMChannel",
"GroupChannel",
"ClanChannel",
)
class DMChannel(Channel["UserMessage"]):
"""Represents the channel a DM is sent in.
Attributes
----------
participant: :class:`~steam.User`
The recipient of any messages sent.
"""
__slots__ = ("participant",)
def __init__(self, state: ConnectionState, participant: User):
super().__init__(state)
self.participant = participant
self.clan = None
self.group = None
def __repr__(self) -> str:
return f"<DMChannel participant={self.participant!r}>"
def _get_message_endpoint(self) -> _EndPointReturnType:
return self.participant._get_message_endpoint()
def _get_image_endpoint(self) -> _EndPointReturnType:
return self.participant._get_image_endpoint()
if TYPE_CHECKING:
async def send(
self,
content: Optional[_SupportsStr] = None,
*,
trade: Optional[TradeOffer] = None,
image: Optional[Image] = None,
) -> Optional[UserMessage]:
...
@asynccontextmanager
async def typing(self) -> AsyncGenerator[None, None]:
"""Send a typing indicator continuously to the channel while in the context manager.
Note
----
This only works in DMs.
Usage: ::
async with ctx.channel.typing():
# do your expensive operations
"""
async def inner() -> None:
while True:
await asyncio.sleep(10)
await self.trigger_typing()
await self.trigger_typing()
task = self._state.loop.create_task(inner())
yield
task.cancel()
async def trigger_typing(self) -> None:
"""Send a typing indicator to the channel once.
Note
----
This only works in DMs.
"""
await self._state.send_user_typing(self.participant.id64)
def history(
self,
limit: Optional[int] = 100,
before: Optional[datetime] = None,
after: Optional[datetime] = None,
) -> DMChannelHistoryIterator:
return DMChannelHistoryIterator(state=self._state, channel=self, limit=limit, before=before, after=after)
class _GroupChannel(Channel[M]):
__slots__ = ("id", "joined_at", "name")
def __init__(self, state: ConnectionState, channel: Any):
super().__init__(state)
self.id = int(channel.chat_id)
self.joined_at: Optional[datetime]
if hasattr(channel, "chat_name"):
split = channel.chat_name.split(" | ", 1)
self.name = split[1] if len(split) != 1 else split[0]
else:
self.name = None
self.joined_at = (
datetime.utcfromtimestamp(int(channel.time_joined)) if hasattr(channel, "time_joined") else None
)
def __repr__(self) -> str:
attrs = ("id", "group")
resolved = [f"{attr}={getattr(self, attr)!r}" for attr in attrs]
return f"<GroupChannel {' '.join(resolved)}>"
def _get_message_endpoint(self) -> _EndPointReturnType:
return (self.id, self.group.id), self._state.send_group_message
def _get_image_endpoint(self) -> _EndPointReturnType:
return (self.id, self.group.id), self._state.http.send_group_image
def history(
self,
limit: Optional[int] = 100,
before: Optional[datetime] = None,
after: Optional[datetime] = None,
) -> GroupChannelHistoryIterator:
return GroupChannelHistoryIterator(state=self._state, channel=self, limit=limit, before=before, after=after)
class GroupChannel(_GroupChannel["GroupMessage"]):
"""Represents a group channel.
Attributes
----------
id: :class:`int`
The ID of the channel.
name: Optional[:class:`str`]
The name of the channel, this could be the same as the :attr:`~steam.Group.name` if it's the main channel.
group: :class:`~steam.Group`
The group to which messages are sent.
joined_at: Optional[:class:`datetime.datetime`]
The time the client joined the chat.
"""
def __init__(self, state: ConnectionState, group: Group, channel: Union[GroupMessageNotification, CChatRoomState]):
super().__init__(state, channel)
self.group = group
class ClanChannel(_GroupChannel["ClanMessage"]): # they're basically the same thing
"""Represents a group channel.
Attributes
----------
id: :class:`int`
The ID of the channel.
name: Optional[:class:`str`]
The name of the channel, this could be the same
as the :attr:`~steam.Clan.name` if it's the main channel.
clan: :class:`~steam.Clan`
The clan to which messages are sent.
joined_at: Optional[:class:`datetime.datetime`]
The time the client joined the chat.
"""
def __init__(
self, state: ConnectionState, clan: Clan, channel: Union[GroupMessageNotification, CUserChatRoomState]
):
super().__init__(state, channel)
self.clan = clan
def __repr__(self) -> str:
attrs = ("id", "clan")
resolved = [f"{attr}={getattr(self, attr)!r}" for attr in attrs]
return f"<ClanChannel {' '.join(resolved)}>"
def _get_message_endpoint(self) -> _EndPointReturnType:
return (self.id, self.clan.chat_id), self._state.send_group_message
def _get_image_endpoint(self) -> _EndPointReturnType:
return (self.id, self.clan.chat_id), self._state.http.send_group_image
| [
"asyncio.sleep"
] | [((3445, 3462), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (3458, 3462), False, 'import asyncio\n')] |
import numpy as np
from UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection import SVDProjection
from UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations import GrassmannOperations
import sys
def test_log_exp_maps():
sol0 = np.array([[0.61415, 1.03029, 1.02001, 0.57327, 0.79874, 0.73274],
[0.56924, 0.91700, 0.88841, 0.53737, 0.68676, 0.67751],
[0.51514, 0.87898, 0.87779, 0.47850, 0.69085, 0.61525],
[0.63038, 1.10822, 1.12313, 0.58038, 0.89142, 0.75429],
[0.69666, 1.03114, 0.95037, 0.67211, 0.71184, 0.82522],
[0.66595, 1.03789, 0.98690, 0.63420, 0.75416, 0.79110]])
sol1 = np.array([[1.05134, 1.37652, 0.95634, 0.85630, 0.47570, 1.22488],
[0.16370, 0.63105, 0.14533, 0.81030, 0.44559, 0.43358],
[1.23478, 2.10342, 1.04698, 1.68755, 0.92792, 1.73277],
[0.90538, 1.64067, 0.62027, 1.17577, 0.63644, 1.34925],
[0.58210, 0.75795, 0.65519, 0.65712, 0.37251, 0.65740],
[0.99174, 1.59375, 0.63724, 0.89107, 0.47631, 1.36581]])
sol2 = np.array([[1.04142, 0.91670, 1.47962, 1.23350, 0.94111, 0.61858],
[1.00464, 0.65684, 1.35136, 1.11288, 0.96093, 0.42340],
[1.05567, 1.33192, 1.56286, 1.43412, 0.77044, 0.97182],
[0.89812, 0.86136, 1.20204, 1.17892, 0.83788, 0.61160],
[0.46935, 0.39371, 0.63534, 0.57856, 0.47615, 0.26407],
[1.14102, 0.80869, 1.39123, 1.33076, 0.47719, 0.68170]])
sol3 = np.array([[0.60547, 0.11492, 0.78956, 0.13796, 0.76685, 0.41661],
[0.32771, 0.11606, 0.67630, 0.15208, 0.44845, 0.34840],
[0.58959, 0.10156, 0.72623, 0.11859, 0.73671, 0.38714],
[0.36283, 0.07979, 0.52824, 0.09760, 0.46313, 0.27906],
[0.87487, 0.22452, 1.30208, 0.30189, 1.22015, 0.62918],
[0.56006, 0.16879, 1.09635, 0.20431, 0.69439, 0.60317]])
# Creating a list of matrices.
matrices = [sol0, sol1, sol2, sol3]
manifold_projection = SVDProjection(matrices, p="max")
points_tangent = GrassmannOperations.log_map(grassmann_points=manifold_projection.u,
reference_point=manifold_projection.u[0])
assert np.round(points_tangent[0][0][0], 2) == 0.0
assert np.round(points_tangent[1][0][0], 8) == 0.0
assert np.round(points_tangent[2][0][0], 8) == 0.0
assert np.round(points_tangent[3][0][0], 8) == 0.0
manifold_points = GrassmannOperations.exp_map(tangent_points=points_tangent,
reference_point=manifold_projection.u[0])
assert np.round(manifold_points[0].data[0][0], 5) == -0.41808
assert np.round(manifold_points[1].data[0][0], 8) == -0.4180759
assert np.round(manifold_points[2].data[0][0], 8) == -0.4180759
assert np.round(manifold_points[3].data[0][0], 8) == -0.4180759
| [
"UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection.SVDProjection",
"numpy.array",
"UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations.GrassmannOperations.log_map",
"UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations.GrassmannOperations.exp_map",
"numpy.round"
] | [((260, 617), 'numpy.array', 'np.array', (['[[0.61415, 1.03029, 1.02001, 0.57327, 0.79874, 0.73274], [0.56924, 0.917, \n 0.88841, 0.53737, 0.68676, 0.67751], [0.51514, 0.87898, 0.87779, 0.4785,\n 0.69085, 0.61525], [0.63038, 1.10822, 1.12313, 0.58038, 0.89142, \n 0.75429], [0.69666, 1.03114, 0.95037, 0.67211, 0.71184, 0.82522], [\n 0.66595, 1.03789, 0.9869, 0.6342, 0.75416, 0.7911]]'], {}), '([[0.61415, 1.03029, 1.02001, 0.57327, 0.79874, 0.73274], [0.56924,\n 0.917, 0.88841, 0.53737, 0.68676, 0.67751], [0.51514, 0.87898, 0.87779,\n 0.4785, 0.69085, 0.61525], [0.63038, 1.10822, 1.12313, 0.58038, 0.89142,\n 0.75429], [0.69666, 1.03114, 0.95037, 0.67211, 0.71184, 0.82522], [\n 0.66595, 1.03789, 0.9869, 0.6342, 0.75416, 0.7911]])\n', (268, 617), True, 'import numpy as np\n'), ((724, 1082), 'numpy.array', 'np.array', (['[[1.05134, 1.37652, 0.95634, 0.8563, 0.4757, 1.22488], [0.1637, 0.63105, \n 0.14533, 0.8103, 0.44559, 0.43358], [1.23478, 2.10342, 1.04698, 1.68755,\n 0.92792, 1.73277], [0.90538, 1.64067, 0.62027, 1.17577, 0.63644, \n 1.34925], [0.5821, 0.75795, 0.65519, 0.65712, 0.37251, 0.6574], [\n 0.99174, 1.59375, 0.63724, 0.89107, 0.47631, 1.36581]]'], {}), '([[1.05134, 1.37652, 0.95634, 0.8563, 0.4757, 1.22488], [0.1637, \n 0.63105, 0.14533, 0.8103, 0.44559, 0.43358], [1.23478, 2.10342, 1.04698,\n 1.68755, 0.92792, 1.73277], [0.90538, 1.64067, 0.62027, 1.17577, \n 0.63644, 1.34925], [0.5821, 0.75795, 0.65519, 0.65712, 0.37251, 0.6574],\n [0.99174, 1.59375, 0.63724, 0.89107, 0.47631, 1.36581]])\n', (732, 1082), True, 'import numpy as np\n'), ((1188, 1548), 'numpy.array', 'np.array', (['[[1.04142, 0.9167, 1.47962, 1.2335, 0.94111, 0.61858], [1.00464, 0.65684, \n 1.35136, 1.11288, 0.96093, 0.4234], [1.05567, 1.33192, 1.56286, 1.43412,\n 0.77044, 0.97182], [0.89812, 0.86136, 1.20204, 1.17892, 0.83788, 0.6116\n ], [0.46935, 0.39371, 0.63534, 0.57856, 0.47615, 0.26407], [1.14102, \n 0.80869, 1.39123, 1.33076, 0.47719, 0.6817]]'], {}), '([[1.04142, 0.9167, 1.47962, 1.2335, 0.94111, 0.61858], [1.00464, \n 0.65684, 1.35136, 1.11288, 0.96093, 0.4234], [1.05567, 1.33192, 1.56286,\n 1.43412, 0.77044, 0.97182], [0.89812, 0.86136, 1.20204, 1.17892, \n 0.83788, 0.6116], [0.46935, 0.39371, 0.63534, 0.57856, 0.47615, 0.26407\n ], [1.14102, 0.80869, 1.39123, 1.33076, 0.47719, 0.6817]])\n', (1196, 1548), True, 'import numpy as np\n'), ((1652, 2012), 'numpy.array', 'np.array', (['[[0.60547, 0.11492, 0.78956, 0.13796, 0.76685, 0.41661], [0.32771, 0.11606,\n 0.6763, 0.15208, 0.44845, 0.3484], [0.58959, 0.10156, 0.72623, 0.11859,\n 0.73671, 0.38714], [0.36283, 0.07979, 0.52824, 0.0976, 0.46313, 0.27906\n ], [0.87487, 0.22452, 1.30208, 0.30189, 1.22015, 0.62918], [0.56006, \n 0.16879, 1.09635, 0.20431, 0.69439, 0.60317]]'], {}), '([[0.60547, 0.11492, 0.78956, 0.13796, 0.76685, 0.41661], [0.32771,\n 0.11606, 0.6763, 0.15208, 0.44845, 0.3484], [0.58959, 0.10156, 0.72623,\n 0.11859, 0.73671, 0.38714], [0.36283, 0.07979, 0.52824, 0.0976, 0.46313,\n 0.27906], [0.87487, 0.22452, 1.30208, 0.30189, 1.22015, 0.62918], [\n 0.56006, 0.16879, 1.09635, 0.20431, 0.69439, 0.60317]])\n', (1660, 2012), True, 'import numpy as np\n'), ((2206, 2238), 'UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection.SVDProjection', 'SVDProjection', (['matrices'], {'p': '"""max"""'}), "(matrices, p='max')\n", (2219, 2238), False, 'from UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection import SVDProjection\n'), ((2261, 2374), 'UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations.GrassmannOperations.log_map', 'GrassmannOperations.log_map', ([], {'grassmann_points': 'manifold_projection.u', 'reference_point': 'manifold_projection.u[0]'}), '(grassmann_points=manifold_projection.u,\n reference_point=manifold_projection.u[0])\n', (2288, 2374), False, 'from UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations import GrassmannOperations\n'), ((2664, 2769), 'UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations.GrassmannOperations.exp_map', 'GrassmannOperations.exp_map', ([], {'tangent_points': 'points_tangent', 'reference_point': 'manifold_projection.u[0]'}), '(tangent_points=points_tangent, reference_point=\n manifold_projection.u[0])\n', (2691, 2769), False, 'from UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations import GrassmannOperations\n'), ((2432, 2468), 'numpy.round', 'np.round', (['points_tangent[0][0][0]', '(2)'], {}), '(points_tangent[0][0][0], 2)\n', (2440, 2468), True, 'import numpy as np\n'), ((2487, 2523), 'numpy.round', 'np.round', (['points_tangent[1][0][0]', '(8)'], {}), '(points_tangent[1][0][0], 8)\n', (2495, 2523), True, 'import numpy as np\n'), ((2542, 2578), 'numpy.round', 'np.round', (['points_tangent[2][0][0]', '(8)'], {}), '(points_tangent[2][0][0], 8)\n', (2550, 2578), True, 'import numpy as np\n'), ((2597, 2633), 'numpy.round', 'np.round', (['points_tangent[3][0][0]', '(8)'], {}), '(points_tangent[3][0][0], 8)\n', (2605, 2633), True, 'import numpy as np\n'), ((2827, 2869), 'numpy.round', 'np.round', (['manifold_points[0].data[0][0]', '(5)'], {}), '(manifold_points[0].data[0][0], 5)\n', (2835, 2869), True, 'import numpy as np\n'), ((2893, 2935), 'numpy.round', 'np.round', (['manifold_points[1].data[0][0]', '(8)'], {}), '(manifold_points[1].data[0][0], 8)\n', (2901, 2935), True, 'import numpy as np\n'), ((2961, 3003), 'numpy.round', 'np.round', (['manifold_points[2].data[0][0]', '(8)'], {}), '(manifold_points[2].data[0][0], 8)\n', (2969, 3003), True, 'import numpy as np\n'), ((3029, 3071), 'numpy.round', 'np.round', (['manifold_points[3].data[0][0]', '(8)'], {}), '(manifold_points[3].data[0][0], 8)\n', (3037, 3071), True, 'import numpy as np\n')] |
from flask import render_template, Flask, request
import interpreter
app = Flask(__name__)
interpreter.main()
@app.route('/', methods=['GET', 'POST'])
def root():
context = {
'dados':interpreter.get_dados(),
'registradores':interpreter.get_registradores(),
'cache':interpreter.get_cache(),
'cache_dec':interpreter.get_cache_decimal(),
'cache_size':interpreter.cache_size,
'original_instruction':None
}
if request.method == 'POST':
if 'instruction' in request.form:
command = {
'instruction':request.form['instruction'],
'register_1':request.form['register_1'],
'register_2':request.form['register_2']
}
interpreter.execute(**command)
context['original_instruction'] = command
context['cache_dec'] = interpreter.get_cache_decimal()
elif 'load' in request.form:
interpreter.update_dados([
request.form['D0'],
request.form['D1'],
request.form['D2'],
request.form['D3'],
request.form['D4'],
request.form['D5'],
request.form['D6'],
request.form['D7']
])
context['dados'] = interpreter.get_dados()
elif 'save' in request.form:
interpreter.save_dados()
else:
print('Invalid post request')
return render_template('page.html', **context)
else:
return render_template('page.html', **context)
| [
"flask.render_template",
"interpreter.update_dados",
"flask.Flask",
"interpreter.get_dados",
"interpreter.execute",
"interpreter.save_dados",
"interpreter.get_cache",
"interpreter.get_registradores",
"interpreter.get_cache_decimal",
"interpreter.main"
] | [((76, 91), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (81, 91), False, 'from flask import render_template, Flask, request\n'), ((92, 110), 'interpreter.main', 'interpreter.main', ([], {}), '()\n', (108, 110), False, 'import interpreter\n'), ((197, 220), 'interpreter.get_dados', 'interpreter.get_dados', ([], {}), '()\n', (218, 220), False, 'import interpreter\n'), ((246, 277), 'interpreter.get_registradores', 'interpreter.get_registradores', ([], {}), '()\n', (275, 277), False, 'import interpreter\n'), ((295, 318), 'interpreter.get_cache', 'interpreter.get_cache', ([], {}), '()\n', (316, 318), False, 'import interpreter\n'), ((340, 371), 'interpreter.get_cache_decimal', 'interpreter.get_cache_decimal', ([], {}), '()\n', (369, 371), False, 'import interpreter\n'), ((1491, 1530), 'flask.render_template', 'render_template', (['"""page.html"""'], {}), "('page.html', **context)\n", (1506, 1530), False, 'from flask import render_template, Flask, request\n'), ((1556, 1595), 'flask.render_template', 'render_template', (['"""page.html"""'], {}), "('page.html', **context)\n", (1571, 1595), False, 'from flask import render_template, Flask, request\n'), ((759, 789), 'interpreter.execute', 'interpreter.execute', ([], {}), '(**command)\n', (778, 789), False, 'import interpreter\n'), ((879, 910), 'interpreter.get_cache_decimal', 'interpreter.get_cache_decimal', ([], {}), '()\n', (908, 910), False, 'import interpreter\n'), ((960, 1155), 'interpreter.update_dados', 'interpreter.update_dados', (["[request.form['D0'], request.form['D1'], request.form['D2'], request.form[\n 'D3'], request.form['D4'], request.form['D5'], request.form['D6'],\n request.form['D7']]"], {}), "([request.form['D0'], request.form['D1'], request.\n form['D2'], request.form['D3'], request.form['D4'], request.form['D5'],\n request.form['D6'], request.form['D7']])\n", (984, 1155), False, 'import interpreter\n'), ((1321, 1344), 'interpreter.get_dados', 'interpreter.get_dados', ([], {}), '()\n', (1342, 1344), False, 'import interpreter\n'), ((1394, 1418), 'interpreter.save_dados', 'interpreter.save_dados', ([], {}), '()\n', (1416, 1418), False, 'import interpreter\n')] |
import argparse
import logging
import sys
from db import DBRunner
from utils import ConfigReader
from xml_fetcher import SshXmlFetcher, XmlFetcher
from xml_parser import XmlParser
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logging.getLogger("paramiko").setLevel(logging.WARNING)
logging.getLogger("sqlalchemy").setLevel(logging.WARNING)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('campaign_name', type=str)
parser.add_argument("-l", "--local", dest="local", action="store_true")
args = parser.parse_args()
config = ConfigReader().config
xml_fetcher = XmlFetcher(args.campaign_name, config) if \
args.local else SshXmlFetcher(args.campaign_name, config)
xml_file_path = xml_fetcher.run()
xml_parser = XmlParser(xml_file_path, config, local=args.local)
data_processes = xml_parser.run().get('data_processes')
from pprint import pprint
pprint(DBRunner().check(data_processes))
| [
"logging.basicConfig",
"logging.getLogger",
"argparse.ArgumentParser",
"xml_fetcher.SshXmlFetcher",
"xml_fetcher.XmlFetcher",
"utils.ConfigReader",
"db.DBRunner",
"xml_parser.XmlParser"
] | [((182, 240), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'stream': 'sys.stdout'}), '(level=logging.INFO, stream=sys.stdout)\n', (201, 240), False, 'import logging\n'), ((398, 423), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (421, 423), False, 'import argparse\n'), ((801, 851), 'xml_parser.XmlParser', 'XmlParser', (['xml_file_path', 'config'], {'local': 'args.local'}), '(xml_file_path, config, local=args.local)\n', (810, 851), False, 'from xml_parser import XmlParser\n'), ((242, 271), 'logging.getLogger', 'logging.getLogger', (['"""paramiko"""'], {}), "('paramiko')\n", (259, 271), False, 'import logging\n'), ((298, 329), 'logging.getLogger', 'logging.getLogger', (['"""sqlalchemy"""'], {}), "('sqlalchemy')\n", (315, 329), False, 'import logging\n'), ((596, 610), 'utils.ConfigReader', 'ConfigReader', ([], {}), '()\n', (608, 610), False, 'from utils import ConfigReader\n'), ((636, 674), 'xml_fetcher.XmlFetcher', 'XmlFetcher', (['args.campaign_name', 'config'], {}), '(args.campaign_name, config)\n', (646, 674), False, 'from xml_fetcher import SshXmlFetcher, XmlFetcher\n'), ((704, 745), 'xml_fetcher.SshXmlFetcher', 'SshXmlFetcher', (['args.campaign_name', 'config'], {}), '(args.campaign_name, config)\n', (717, 745), False, 'from xml_fetcher import SshXmlFetcher, XmlFetcher\n'), ((954, 964), 'db.DBRunner', 'DBRunner', ([], {}), '()\n', (962, 964), False, 'from db import DBRunner\n')] |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pandas as pd
def re_split_col(arr):
pattern = re.compile(r'(\d+)')
ret = [pattern.split(string) for string in arr]
data = [[str_list[0], ''.join(str_list[0:3]), ''.join(str_list[3:])] for str_list in ret]
data = np.array(data)
print(data)
data = pd.DataFrame(data=data)
return data
def main():
data = pd.read_csv('./decice_name.csv')
data[['SouDev', 'SouArea']] = data['DEVNAME'].str.split('-', n=1, expand=True)
data[['DesDev', 'DesArea']] = data['PEERDEVNAME'].str.split('-', n=1, expand=True)
data[['SouPark', 'SouOne', 'SouTwo']] = re_split_col(data['SouDev'].values)
data[['DesPark', 'DesOne', 'DesTwo']] = re_split_col(data['DesDev'].values)
print(data.columns)
print(data.head())
if __name__ == '__main__':
main()
"""
Index(['DEVNAME', 'T.INTTYPE||T.ININUM', 'PEERDEVNAME',
'T.PEERINTTYPE||T.PEERINTNUM'],
dtype='object')
====================
DEVNAME T.INTTYPE||T.ININUM PEERDEVNAME T.PEERINTTYPE||T.PEERINTNUM
0 JCK72WA01-A1 GigabitEthernet0/1 JCK65RT0A-C1 GigabitEthernet3/1
1 JCK72WA01-A1 GigabitEthernet0/2 JCK65RT0B-C1 GigabitEthernet3/1
2 JCK72WA02-A1 GigabitEthernet0/1 JCK65RT0A-C1 GigabitEthernet3/2
3 JCK72WA02-A1 GigabitEthernet0/2 JCK65RT0B-C1 GigabitEthernet3/2
4 JCK31BL11-C1 FastEthernet0 JCK31BL12-C1 FastEthernet0
Index(['DEVNAME', 'T.INTTYPE||T.ININUM', 'PEERDEVNAME',
'T.PEERINTTYPE||T.PEERINTNUM', 'SouDev', 'SouArea', 'DesDev', 'DesArea',
'SouPark', 'SouOne', 'SouTwo', 'DesPark', 'DesOne', 'DesTwo'],
dtype='object')
DEVNAME T.INTTYPE||T.ININUM PEERDEVNAME ... DesPark DesOne DesTwo
0 JCK72WA01-A1 GigabitEthernet0/1 JCK65RT0A-C1 ... JCK JCK65RT 0A
1 JCK72WA01-A1 GigabitEthernet0/2 JCK65RT0B-C1 ... JCK JCK65RT 0B
2 JCK72WA02-A1 GigabitEthernet0/1 JCK65RT0A-C1 ... JCK JCK65RT 0A
3 JCK72WA02-A1 GigabitEthernet0/2 JCK65RT0B-C1 ... JCK JCK65RT 0B
4 JCK31BL11-C1 FastEthernet0 JCK31BL12-C1 ... JCK JCK31BL 12
[5 rows x 14 columns]
"""
| [
"pandas.DataFrame",
"numpy.array",
"pandas.read_csv",
"re.compile"
] | [((114, 134), 're.compile', 're.compile', (['"""(\\\\d+)"""'], {}), "('(\\\\d+)')\n", (124, 134), False, 'import re\n'), ((292, 306), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (300, 306), True, 'import numpy as np\n'), ((334, 357), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (346, 357), True, 'import pandas as pd\n'), ((399, 431), 'pandas.read_csv', 'pd.read_csv', (['"""./decice_name.csv"""'], {}), "('./decice_name.csv')\n", (410, 431), True, 'import pandas as pd\n')] |
import os
from dogapp import dog
def test_input_url():
response = dog.get_model_output("https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg")
assert response["data"]["prediction"][0]["input_url"] == "https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg"
def test_class():
response = dog.get_model_output("https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg")
with open(os.path.join(os.getcwd(), './dogapp/dog_names.txt')) as file:
dog_names = file.read()
dog_names = dog_names.split('\n')
assert response["data"]["prediction"][0]["class"] in dog_names | [
"dogapp.dog.get_model_output",
"os.getcwd"
] | [((73, 278), 'dogapp.dog.get_model_output', 'dog.get_model_output', (['"""https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg"""'], {}), "(\n 'https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg'\n )\n", (93, 278), False, 'from dogapp import dog\n'), ((538, 743), 'dogapp.dog.get_model_output', 'dog.get_model_output', (['"""https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg"""'], {}), "(\n 'https://www.thesprucepets.com/thmb/wpN_ZunUaRQAc_WRdAQRxeTbyoc=/4231x2820/filters:fill(auto,1)/adorable-white-pomeranian-puppy-spitz-921029690-5c8be25d46e0fb000172effe.jpg'\n )\n", (558, 743), False, 'from dogapp import dog\n'), ((761, 772), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (770, 772), False, 'import os\n')] |
import time
import numpy
import json
# pypot imports
import pypot.robot
import pypot.dynamixel
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import csv
import pypot.dynamixel
import sys
num1 = 2
num2 = 4
ports = pypot.dynamixel.get_available_ports()
state_file = open("Shoulder_sweep_with_torque.csv", "w")
if not ports:
raise IOError('no port found!')
print('ports found', ports)
print('connecting on the first available port:', ports[0])
dxl_io = pypot.dynamixel.DxlIO(ports[0])
time_start = time.clock()
def setTraj1(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_traj1_size({id: 4})
time.sleep(delay)
dxl_io.set_duration1({id: duration})
time.sleep(delay)
dxl_io.set_a0_traj1({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_traj1({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_traj1({id: coeffs[2]})
time.sleep(delay)
dxl_io.set_a3_traj1({id: coeffs[3]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
# print "Nope :/"
break
print("nb errors1 = ", errorCounter)
def setTraj2(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_traj2_size({id: 4})
time.sleep(delay)
dxl_io.set_duration2({id: duration})
time.sleep(delay)
dxl_io.set_a0_traj2({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_traj2({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_traj2({id: coeffs[2]})
time.sleep(delay)
dxl_io.set_a3_traj2({id: coeffs[3]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
print("nb errors2 = ", errorCounter)
break
def setTorque1(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_torque1_size({id: 4})
time.sleep(delay)
dxl_io.set_duration1({id: duration})
time.sleep(delay)
dxl_io.set_a0_torque1({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_torque1({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_torque1({id: coeffs[2]})
time.sleep(delay)
dxl_io.set_a3_torque1({id: coeffs[3]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
# print "Nope :/"
pass
# print "Nb errors : ", errorCounter
def setTorque2(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_torque2_size({id: 3})
time.sleep(delay)
dxl_io.set_duration2({id: duration})
time.sleep(delay)
dxl_io.set_a0_torque2({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_torque2({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_torque2({id: coeffs[2]})
time.sleep(delay)
dxl_io.set_a3_torque2({id: coeffs[3]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
# print "Nope :/"
pass
def func2(t, c, d, e, f):
return c*pow(t, 3) + d*pow(t, 2) + e*t + f
def read_file(inp):
data = []
cp = []
with open(inp, 'r') as file:
reader = csv.reader(file)
for row in reader:
data.append(list(map(float, row)))
res, t = [], []
res1, t1 =[], []
# res2, t2 =[], []
# res3, t3 =[], []
k = 1
for element in data:
if element[0] <= k * 0.5:
t.append(element[1])
t1.append(element[2])
# t2.append(element[3])
# t3.append(element[4])
else:
k = k + 1
res.append(t)
res1.append(t1)
# res2.append(t1)
# res3.append(t1)
t = []
t1 = []
# t2 = []
# t3 = []
t.append(element[1])
t1.append(element[2])
# t2.append(element[3])
# t3.append(element[4])
cp.append(element[0])
return res, res1
# return res ,res1 ,res2 ,res3
# main Program
# file_name = input('Enter csv file for motor: ')
angle,torque = read_file('Shoulder.csv')
# angle1, angle2, angle3, angle4 = read_file(file_name)
coeff1 = {}
pcov1 = {}
count1 = 0
for value in angle:
coeff1[count1], pcov1[count1] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
# print(coeff1[count1],count1)
count1 = count1 + 1
coeff2 = {}
pcov2 = {}
count2 = 0
for value in torque:
coeff2[count2], pcov2[count2] = curve_fit(func2, np.linspace(0,0.5,len(value)),value)
# print(coeff2[count2],count2)
count2 = count2 + 1
# (angle+180)*4096)/360)
print ("Test with PID only:")
dxl_io.set_mode_dynaban({num1:0})
time.sleep(0.1)
dxl_io.enable_torque({num1:1})
time.sleep(0.1)
# dxl_io.enable_torque({1:1})
# time.sleep(0.1)
# dxl_io.set_mode_dynaban({num2:0})
# time.sleep(0.1)
# dxl_io.enable_torque({num2:1})
# time.sleep(0.1)
# dxl_io.set_goal_position({num1:-75})
dxl_io.set_goal_position({num1:0})
time.sleep(1)
dxl_io.set_pid_gain({num1:[1,0,0]})
time.sleep(0.1)
# dxl_io.set_pid_gain({num2:[2,0,0]})
# time.sleep(0.1)
# print ("Setting traj1 :")
# dxl_io.set_max_torque({num1:100})
# print(dxl_io.get_goal_position([num]))
for i in range(0,len(coeff1)):
if i == 0:
setTraj1(num1,5000, [coeff1[i][3],coeff1[i][2],coeff1[i][1],coeff1[i][0]])
setTorque1(num1,5000, [coeff2[i][3],coeff2[i][2],coeff2[i][1],coeff2[i][0]])
dxl_io.set_mode_dynaban({num1:3})
else:
setTraj2(num1,5000, [coeff1[i][3],coeff1[i][2],coeff1[i][1],coeff1[i][0]])
setTorque2(num1,5000, [coeff2[i][3],coeff2[i][2],coeff2[i][1],coeff2[i][0]])
dxl_io.set_copy_next_buffer({num1:1})
# time.sleep(0.5)
time_current = time.time()
while (time.time()-time_current) <= 0.5:
# print((time.time()-time_current))
str_state = [str(dxl_io.get_present_position([num1])[0]),str(dxl_io.get_outputTorque([num1])[0])]
state_file.write(",".join(str_state) + "\n")
time.sleep(0.025)
time_elapsed = (time.clock() - time_start)
print(time_elapsed-time_start)
| [
"time.sleep",
"csv.reader",
"time.clock",
"time.time"
] | [((561, 573), 'time.clock', 'time.clock', ([], {}), '()\n', (571, 573), False, 'import time\n'), ((5205, 5220), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5215, 5220), False, 'import time\n'), ((5252, 5267), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5262, 5267), False, 'import time\n'), ((5496, 5509), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5506, 5509), False, 'import time\n'), ((5546, 5561), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5556, 5561), False, 'import time\n'), ((6600, 6612), 'time.clock', 'time.clock', ([], {}), '()\n', (6610, 6612), False, 'import time\n'), ((3691, 3707), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (3701, 3707), False, 'import csv\n'), ((6277, 6288), 'time.time', 'time.time', ([], {}), '()\n', (6286, 6288), False, 'import time\n'), ((734, 751), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (744, 751), False, 'import time\n'), ((813, 830), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (823, 830), False, 'import time\n'), ((892, 909), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (902, 909), False, 'import time\n'), ((971, 988), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (981, 988), False, 'import time\n'), ((1050, 1067), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1060, 1067), False, 'import time\n'), ((1129, 1146), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1139, 1146), False, 'import time\n'), ((1487, 1504), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1497, 1504), False, 'import time\n'), ((1566, 1583), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1576, 1583), False, 'import time\n'), ((1645, 1662), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1655, 1662), False, 'import time\n'), ((1724, 1741), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1734, 1741), False, 'import time\n'), ((1803, 1820), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1813, 1820), False, 'import time\n'), ((1882, 1899), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1892, 1899), False, 'import time\n'), ((2213, 2230), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2223, 2230), False, 'import time\n'), ((2292, 2309), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2302, 2309), False, 'import time\n'), ((2373, 2390), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2383, 2390), False, 'import time\n'), ((2454, 2471), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2464, 2471), False, 'import time\n'), ((2535, 2552), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2545, 2552), False, 'import time\n'), ((2616, 2633), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2626, 2633), False, 'import time\n'), ((2968, 2985), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2978, 2985), False, 'import time\n'), ((3047, 3064), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3057, 3064), False, 'import time\n'), ((3128, 3145), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3138, 3145), False, 'import time\n'), ((3209, 3226), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3219, 3226), False, 'import time\n'), ((3290, 3307), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3300, 3307), False, 'import time\n'), ((3371, 3388), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3381, 3388), False, 'import time\n'), ((6565, 6582), 'time.sleep', 'time.sleep', (['(0.025)'], {}), '(0.025)\n', (6575, 6582), False, 'import time\n'), ((6304, 6315), 'time.time', 'time.time', ([], {}), '()\n', (6313, 6315), False, 'import time\n')] |
"""
Module for calculations with Rb-Sr and Sm-Nd isotopic systems
"""
from numpy import exp
import pandas as pd
def epsNd(Ndrat,Smrat=0,age=0):
"""
Calculate epsilon Nd using CHUR values from Bouvier et al., 2008
Parameters:
Ndrat: Measured 143Nd/144Nd ratio
Smrat: Measured 147Sm/144Nd ratio
age: Age of sample in Ma for initial calculation
Returns:
eNd: epsilon Nd for present-day 143Nd/144Nd
Ndi: initial 143Nd/144Nd ratio
eNdi: epsilon Nd for initial 143Nd/144Nd ratio
"""
lambdaSm = 6.54e-12 # Sm-147 decay constant
years = 10**6 # Converter for Ma to years
time = age*years # Years for initial calc
Ndi = Ndrat-(Smrat*(exp(lambdaSm*time)-1)) # Calculate initial 143Nd/144Nd
CHUR143 = 0.512630 #CHUR 143Nd/144Nd from Bouvier et al., 2008
CHUR147 = 0.1960 # CHUR 147Sm/44Nd from Bouvier et al., 2008
CHUR143i = CHUR143-CHUR147*(exp(lambdaSm*time)-1) # Calculate CHUR for age
eNd = ((Ndrat/CHUR143)-1)*10**4 # Calculate EpsNd
eNdi = ((Ndi/CHUR143i)-1)*10**4 # Calculate EpsNdi
# If age a single integer, create iterable list.
if isinstance(age,int):
age = pd.Series(age)
if 0 not in age.values:
return (Ndi,eNdi)
elif 0 in age.values:
return (eNd)
else:
print('Mix of zero and non-zero ages')
def Srinit(Srrat,Rbrat,age):
"""
Calculate initial 87Sr/86Sr.
Paramters:
Srrat: Measured 87Sr/86Sr ratio
Rbrat: Measured 87Rb/86Sr ratio
age: Age of sample in Ma
Returns:
Sri: initial 87Sr/86Sr ratio
"""
lambdaRb = 1.39e-11 # Rb-87 decay constant
years = 10**6 # Converter for Ma to years
time = age*years # Years for initial calc
Sri = Srrat-(Rbrat*(exp(lambdaRb*time)-1)) # Calculate initial 87Sr/86Sr
return (Sri)
def RbSr_rat(Rb,Sr,Srrat):
"""
Calculate 87Rb/86Sr from Rb and Sr concentrations
Paramters:
Rb: Rb concentration (ppm)
Sr: Sr concentration (ppm)
Srrat: 87Sr/86Sr ratio
Returns:
rbsr8786: Calculated 87Rb/86Sr ratio
"""
# Fundamental Rb and Sr isotopic parameters - from CIAAW
Rb85_mass = 84.91178974 # Da
Rb87_mass = 86.90918053 # Da
Rb85_abund = 0.7217
Rb87_abund = 0.2783
Sr84_mass = 83.913419
Sr86_mass = 85.90926073
Sr87_mass = 86.90887750
Sr88_mass = 87.90561226
# Sr abundances vary - only used for calculation of ratios that
# don't vary
Sr84_abund = 0.0056
Sr86_abund = 0.0986
Sr87_abund = 0.0700 # Not used
Sr88_abund = 0.8258
Sr_8886 = Sr88_abund/Sr86_abund # 88Sr/86Sr ratio - doesn't vary
Sr_8486 = Sr84_abund/Sr86_abund # 84Sr/86Sr ratio - doesn't vary
# Calculate true abundances
Sr86_abund_calc = Srrat/(Srrat+Sr_8886+Sr_8486+1)
Sr84_abund_calc = Sr86_abund_calc*Sr_8486
Sr88_abund_calc = Sr86_abund_calc*Sr_8886
Sr87_abund_calc = Sr86_abund_calc*Srrat
# Total Mass for Rb and Sr
Rb_mass = Rb85_mass*Rb85_abund + Rb87_mass*Rb87_abund
Sr_mass = (
Sr84_mass*Sr84_abund_calc + Sr86_mass*Sr86_abund_calc
+ Sr87_mass*Sr87_abund_calc + Sr88_mass*Sr88_abund_calc
)
# 87Rb and 86Sr
Rb87 = Rb*Rb87_abund/Rb_mass # Get mol of Rb87
Sr86 = Sr*Sr86_abund_calc/Sr_mass # Get mol of Sr86
rbsr8786 = Rb87/Sr86
check = (Rb/Sr)*(2.69295 + 0.28304*Srrat)
print('Check: ',check)
return(rbsr8786)
def SmNd_rat(Sm,Nd,Ndrat):
"""
Calculate 147Sm/144Nd using reported Sm, Nd, and 143Nd/144Nd
Parameters:
Sm: Sm concentration (ppm)
Nd: Nd concentration (ppm)
Ndrat: 143Nd/144Nd ratio
Returns:
smnd147/144: Calculated 147Sm/144Nd ratio
"""
# Sm and Nd isotopic parameters - from CIAAW
Sm144_mass = 143.91201
Sm147_mass = 146.91490
Sm148_mass = 147.91483
Sm149_mass = 148.917191
Sm150_mass = 149.917282
Sm152_mass = 151.919739
Sm154_mass = 153.92222
Sm144_abund = 0.0308
Sm147_abund = 0.1500
Sm148_abund = 0.1125
Sm149_abund = 0.1382
Sm150_abund = 0.0737
Sm152_abund = 0.2674
Sm154_abund = 0.2274
Nd142_mass = 141.90773
Nd143_mass = 142.90982
Nd144_mass = 143.91009
Nd145_mass = 144.91258
Nd146_mass = 145.91312
Nd148_mass = 147.91690
Nd150_mass = 149.920902
# Nd abundances vary
# Non-varying Nd ratios (from Faure et al., 2005)
Nd_146144 = 0.7219 # Hamilton et al., 1983
Nd_142144 = 1.141827
Nd_145144 = 0.348417
Nd_148144 = 0.241578
Nd_150144 = 0.236418
# Calculate Nd abundances
Nd144_abund = Ndrat/(Ndrat + 1 + Nd_146144 + Nd_142144 + Nd_145144
+ Nd_148144 + Nd_150144)
Nd142_abund = Nd_142144*Nd144_abund
Nd143_abund = Ndrat*Nd144_abund
Nd145_abund = Nd_145144*Nd144_abund
Nd146_abund = Nd_146144*Nd144_abund
Nd148_abund = Nd_148144*Nd144_abund
Nd150_abund = Nd_150144*Nd144_abund
# Total mass for Sm and Nd
Sm_mass = (
Sm144_mass*Sm144_abund + Sm147_mass*Sm147_abund + Sm148_mass*
Sm148_abund + Sm149_mass*Sm149_abund + Sm150_mass*Sm150_abund +
Sm152_mass*Sm152_abund + Sm154_mass*Sm154_abund
)
Nd_mass = (
Nd142_mass*Nd142_abund + Nd143_mass*Nd143_abund +
Nd144_mass*Nd144_abund + Nd145_mass*Nd145_abund +
Nd146_mass*Nd146_abund + Nd148_mass*Nd148_abund +
Nd150_mass*Nd150_abund
)
# 147Sm and 143Nd
Sm147 = Sm*Sm147_abund/Sm_mass
Nd144 = Nd*Nd144_abund/Nd_mass
smnd147144 = Sm147/Nd144
check1 = (Sm/Nd) * (0.53149+0.14252*Ndrat)
check2 = (Sm/Nd) * 0.602
print('Check1: ',check1)
print('Check2: ',check2)
return(smnd147144) | [
"pandas.Series",
"numpy.exp"
] | [((1198, 1212), 'pandas.Series', 'pd.Series', (['age'], {}), '(age)\n', (1207, 1212), True, 'import pandas as pd\n'), ((723, 743), 'numpy.exp', 'exp', (['(lambdaSm * time)'], {}), '(lambdaSm * time)\n', (726, 743), False, 'from numpy import exp\n'), ((947, 967), 'numpy.exp', 'exp', (['(lambdaSm * time)'], {}), '(lambdaSm * time)\n', (950, 967), False, 'from numpy import exp\n'), ((1831, 1851), 'numpy.exp', 'exp', (['(lambdaRb * time)'], {}), '(lambdaRb * time)\n', (1834, 1851), False, 'from numpy import exp\n')] |
# Copyright (c) 2017, 2018 <NAME>
# See the file LICENSE for details.
from x2py.builtin_events import *
from x2py.event import EventProxy
from x2py.flow import Flow
from x2py.flows.event_based_flow import EventBasedFlow
from x2py.util.trace import Trace
class ThreadlessFlow(EventBasedFlow):
def __init__(self, name=None):
super(ThreadlessFlow, self).__init__(name)
self.running = False
def start(self):
with self._lock:
if self.running:
return
self._setup()
self.cases.setup_with(self)
Flow.thread_local.current = self
Flow.thread_local.event_proxy = EventProxy()
Flow.thread_local.handler_chain = []
self.running = True
self.queue.enqueue(FlowStart())
def stop(self):
with self._lock:
if not self.running:
return
self.queue.close(FlowStop())
self.running = False
Flow.thread_local.handler_chain = None
Flow.thread_local.event_proxy = None
Flow.thread_local.current = None
self.cases.teardown_with(self)
self._teardown()
def dispatch(self):
event = self.queue.dequeue()
if event is None:
return
self.dispatch(event)
def try_dispatch(self):
event = self.queue.try_dequeue()
if event is not None:
self.dispatch(event)
return event
def try_dispatch_all(self):
n = 0
while True:
event = self.queue.try_dequeue()
if event is None:
break
self.dispatch(event)
n += 1
return n
| [
"x2py.event.EventProxy"
] | [((665, 677), 'x2py.event.EventProxy', 'EventProxy', ([], {}), '()\n', (675, 677), False, 'from x2py.event import EventProxy\n')] |
from sklearn_explain.tests.skl_datasets_reg import skl_datasets_test as skltest
skltest.test_reg_dataset_and_model("freidman1" , "RandomForestRegressor_5")
| [
"sklearn_explain.tests.skl_datasets_reg.skl_datasets_test.test_reg_dataset_and_model"
] | [((82, 156), 'sklearn_explain.tests.skl_datasets_reg.skl_datasets_test.test_reg_dataset_and_model', 'skltest.test_reg_dataset_and_model', (['"""freidman1"""', '"""RandomForestRegressor_5"""'], {}), "('freidman1', 'RandomForestRegressor_5')\n", (116, 156), True, 'from sklearn_explain.tests.skl_datasets_reg import skl_datasets_test as skltest\n')] |
"""
**********************************************************************************
This is the main application file!!
It sets up all the flask application configuration settings, and intializes the
flask application used throughout the code.
**********************************************************************************
"""
# third party libraries
from flask import Flask
from flask_cors import CORS
#from flask_caching import Cache
# my libraries
from .common.CustomEncoder import CustomEncoder
# blueprints
from . import routes
#----------------------------------------------------------
# Setup the flask app
# Add custom config options
#----------------------------------------------------------
def initApp(flaskApp: Flask):
# setup the custom response json encoder
flaskApp.json_encoder = CustomEncoder
flaskApp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
# setup caching
# flaskApp.config['CACHE_TYPE'] = "SimpleCache"
# flaskApp.config['CACHE_DEFAULT_TIMEOUT'] = 300
# setup the CORS policy
CORS(flaskApp)
#----------------------------------------------------------
# Registert all the blueprints
#----------------------------------------------------------
def registerBlueprints(flaskApp: Flask):
flaskApp.register_blueprint(routes.account.bp_account, url_prefix='/account')
flaskApp.register_blueprint(routes.completions.bp_completions, url_prefix='/completions')
flaskApp.register_blueprint(routes.events.bp_events, url_prefix='/events')
flaskApp.register_blueprint(routes.recurrences.bp_recurrences, url_prefix='/recurrences')
flaskApp.register_blueprint(routes.cancelations.bp_cancelations, url_prefix='/cancelations')
# call all the init functions
app = Flask(__name__)
initApp(app)
registerBlueprints(app)
# cache = Cache(app) | [
"flask_cors.CORS",
"flask.Flask"
] | [((1761, 1776), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1766, 1776), False, 'from flask import Flask\n'), ((1066, 1080), 'flask_cors.CORS', 'CORS', (['flaskApp'], {}), '(flaskApp)\n', (1070, 1080), False, 'from flask_cors import CORS\n')] |
"""This module contains useful class definitions for different types of geometries.
This module contains the following classes:
Airplane: This is a class used to contain airplanes.
Wing: This is a class used to contain the wings of an current_airplane.
WingCrossSection: This class is used to contain the cross sections of the wings
of an current_airplane.
Airfoil: This class is used to contain the airfoil of a cross section of a wing
of an current_airplane.
This module contains the following exceptions:
None
This module contains the following functions:
None
"""
import importlib.resources
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as sp_interp
from . import functions
from . import meshing
class Airplane:
"""This is a class used to contain airplanes.
Citation:
Adapted from: geometry.Airplane in AeroSandbox
Author: <NAME>
Date of Retrieval: 04/23/2020
This class contains the following public methods:
set_reference_dimensions_from_wing: This method sets the reference dimensions
of the current_airplane from measurements obtained from the main wing.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(
self,
name="Untitled",
x_ref=0.0,
y_ref=0.0,
z_ref=0.0,
wings=None,
s_ref=None,
c_ref=None,
b_ref=None,
):
"""This is the initialization method.
:param name: str, optional
A sensible name for your current_airplane. The default is "Untitled".
:param x_ref: float, optional
This is the x coordinate of the moment reference point. It should be the
x coordinate of the center of
gravity. The default is 0.0.
:param y_ref: float, optional
This is the y coordinate of the moment reference point. It should be the
y coordinate of the center of
gravity. The default is 0.0.
:param z_ref: float, optional
This is the z coordinate of the moment reference point. It should be the
z coordinate of the center of
gravity. The default is 0.0.
:param wings: list of Wing objects, optional
This is a list of the current_airplane's wings defined as Wing objects.
The default is None, which this
method converts to an empty list.
:param s_ref: float, optional if more than one wing is in the wings list.
This is the reference wetted area. If not set, it populates from first
wing object.
:param c_ref: float, optional if more than one wing is in the wings list.
This is the reference chord length. If not set, it populates from first
wing object.
:param b_ref: float, optional if more than one wing is in the wings list.
This is the reference calculate_span. If not set, it populates from first
wing object.
"""
# Initialize the name and the moment reference point.
self.name = name
self.x_ref = x_ref
self.y_ref = y_ref
self.z_ref = z_ref
self.xyz_ref = np.array(
[float(self.x_ref), float(self.y_ref), float(self.z_ref)]
)
# If wings was passed as None, set wings to an empty list.
if wings is None:
wings = []
self.wings = wings
# If the the wing list is not empty, set the wing reference dimensions to be
# the main wing's reference
# dimensions.
if len(self.wings) > 0:
self.set_reference_dimensions_from_main_wing()
# If any of the passed reference dimensions are not None, set that reference
# dimension to be what was passed.
if s_ref is not None:
self.s_ref = float(s_ref)
if c_ref is not None:
self.c_ref = float(c_ref)
if b_ref is not None:
self.b_ref = float(b_ref)
# Calculate the number of panels in the entire current_airplane.
self.num_panels = 0
for wing_position, wing in enumerate(self.wings):
self.num_panels += wing.num_panels
# Initialize empty class attributes to hold the force, moment,
# force coefficients, and moment coefficients this
# airplane experiences after
self.total_near_field_force_wind_axes = None
self.total_near_field_force_coefficients_wind_axes = None
self.total_near_field_moment_wind_axes = None
self.total_near_field_moment_coefficients_wind_axes = None
def set_reference_dimensions_from_main_wing(self):
"""This method sets the reference dimensions of the current_airplane from
measurements obtained from the main
wing.
This method assumes the main wing to be the first wing in the wings list
passed by the user.
:return: None
"""
# Define the main wing to be the first wing in the wings list.
main_wing = self.wings[0]
# Set the objects reference dimension attributes to be the reference
# dimension attributes of the main wing.
# These attributes are calculated via methods in the Wing class.
self.s_ref = float(main_wing.wetted_area)
self.b_ref = float(main_wing.span)
self.c_ref = float(main_wing.wetted_area / main_wing.span)
class Wing:
"""This is a class used to contain the wings of an current_airplane.
If the wing is symmetric across the XZ plane, just define the right half and
supply "symmetric=True" in the constructor. If the wing is not symmetric across
the XZ plane, just define the wing.
Citation:
Adapted from: geometry.Wing in AeroSandbox
Author: <NAME>
Date of Retrieval: 04/24/2020
This class contains the following public methods:
calculate_wetted_area: This method calculates the wetted area of the wing
based on the areas of its panels.
calculate_span: This method calculates the span of the wing.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(
self,
name="Untitled Wing",
x_le=0.0,
y_le=0.0,
z_le=0.0,
wing_cross_sections=None,
symmetric=False,
num_chordwise_panels=8,
chordwise_spacing="cosine",
):
"""This is the initialization method.
:param name: str, optional
This is a sensible name for the wing. The default is "Untitled Wing".
:param x_le: float, optional
This is the x coordinate of the leading edge of the wing, relative to the
current_airplane's reference
point. The default is 0.0.
:param y_le: float, optional
This is the y coordinate of the leading edge of the wing, relative to the
current_airplane's reference
point. The default is 0.0.
:param z_le: float, optional
This is the z coordinate of the leading edge of the wing, relative to the
current_airplane's reference
point. The default is 0.0.
:param wing_cross_sections: list of WingCrossSection objects, optional
This is a list of WingCrossSection objects, that represent the wing's
cross sections. The default is None.
:param symmetric: bool, optional
Set this to true if the wing is across the xz plane. Set it to false if
not. The default is false.
:param num_chordwise_panels: int, optional
This is the number of chordwise panels to be used on this wing. The
default is 8.
:param chordwise_spacing: str, optional
This is the type of spacing between the wing's chordwise panels. It can
be set to "cosine" or "uniform".
Cosine is highly recommended. The default is cosine.
"""
# Initialize the name and the position of the wing's leading edge.
self.name = name
self.x_le = x_le
self.y_le = y_le
self.z_le = z_le
self.leading_edge = np.array(
[float(self.x_le), float(self.y_le), float(self.z_le)]
)
# If wing_cross_sections is set to None, set it to an empty list.
if wing_cross_sections is None:
wing_cross_sections = []
# Initialize the other attributes.
self.wing_cross_sections = wing_cross_sections
self.symmetric = symmetric
self.num_chordwise_panels = num_chordwise_panels
self.chordwise_spacing = chordwise_spacing
# Catch invalid values of chordwise_spacing.
if self.chordwise_spacing not in ["cosine", "uniform"]:
raise Exception("Invalid value of chordwise_spacing!")
# Find the number of spanwise panels on the wing by adding each cross
# section's number of spanwise panels. Exclude the last cross section's
# number of spanwise panels as this is irrelevant. If the wing is symmetric,
# multiple the summation by two.
self.num_spanwise_panels = 0
for cross_section in self.wing_cross_sections[:-1]:
self.num_spanwise_panels += cross_section.num_spanwise_panels
if self.symmetric:
self.num_spanwise_panels *= 2
if self.symmetric and self.wing_cross_sections[0].y_le != 0:
raise Exception("Symmetric wing with root wing cross section off XZ plane!")
# Calculate the number of panels on this wing.
self.num_panels = self.num_spanwise_panels * self.num_chordwise_panels
# Initialize the the panels attribute. Then mesh the wing, which will
# populate this attribute.
self.panels = None
meshing.mesh_wing(self)
# Initialize and calculate the wing's wetted area. If the wing is
# symmetrical, this includes the area of the
# mirrored half.
self.wetted_area = None
self.calculate_wetted_area()
# Initialize and calculate the wing's calculate_span. If the wing is
# symmetrical, this includes the length of
# the mirrored half.
self.span = None
self.calculate_span()
# Initialize an empty array to hold this wing's wake ring vortices and its
# wake ring vortex vertices.
self.wake_ring_vortex_vertices = np.empty((0, self.num_spanwise_panels + 1, 3))
self.wake_ring_vortices = np.zeros((0, self.num_spanwise_panels), dtype=object)
def calculate_wetted_area(self):
"""This method calculates the wetted area of the wing based on the areas of
its panels.
This method also updates the class's wetted area attribute. If the wing is
symmetrical, it includes the area of
the mirrored half.
:return: None
"""
wetted_area = 0
# Iterate through the chordwise and spanwise indices of the panels.
for chordwise_location in range(self.num_chordwise_panels):
for spanwise_location in range(self.num_spanwise_panels):
# Add each panel's area to the total wetted area of the wing.
wetted_area += self.panels[chordwise_location, spanwise_location].area
self.wetted_area = wetted_area
def calculate_span(self):
"""This method calculates the calculate_span of the wing.
This method also updates the class's span attribute. If the wing is
symmetrical, it includes the length of the mirrored half.
:return: None
"""
# Calculate the span (y-distance between the root and the tip) of the entire
# wing.
span = (
self.wing_cross_sections[-1].leading_edge[1]
- self.wing_cross_sections[0].leading_edge[1]
)
# If the wing is symmetric, multiply the span by two.
if self.symmetric:
span *= 2
self.span = span
class WingCrossSection:
"""This class is used to contain the cross sections of the wings of an
current_airplane.
Citation:
Adapted from: geometry.WingXSec in AeroSandbox
Author: <NAME>
Date of Retrieval: 04/26/2020
This class contains the following public methods:
xyz_te: This method calculates the coordinates of the trailing edge of the
cross section.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(
self,
x_le=0.0,
y_le=0.0,
z_le=0.0,
chord=1.0,
twist=0.0,
airfoil=None,
control_surface_type="symmetric",
control_surface_hinge_point=0.75,
control_surface_deflection=0.0,
num_spanwise_panels=8,
spanwise_spacing="cosine",
):
"""This is the initialization method.
:param x_le: float, optional
This is the x coordinate of the leading edge of the cross section
relative to the wing's datum. The default
value is 0.0.
:param y_le: float, optional
This is the y coordinate of the leading edge of the cross section
relative to the wing's datum. The default
value is 0.0.
:param z_le: float, optional
This is the z coordinate of the leading edge of the cross section
relative to the wing's datum. The default
value is 0.0.
:param chord: float, optional
This is the chord of the wing at this cross section. The default value is
1.0.
:param twist: float, optional
This is the twist of the cross section about the leading edge in degrees.
The default value is 0.0.
:param airfoil: Airfoil, optional
This is the airfoil to be used at this cross section. The default value
is None.
:param control_surface_type: str, optional
This is type of control surfaces for this cross section. It can be
"symmetric" or "asymmetric". An example
of symmetric control surfaces are flaps. An example of asymmetric control
surfaces are ailerons. The default
value is "symmetric".
:param control_surface_hinge_point: float, optional
This is the The location of the control surface hinge from the leading
edge as a fraction of chord. The
default value is 0.75.
:param control_surface_deflection: float, optional
This is the Control deflection in degrees. Deflection downwards is
positive. The default value is 0.0
degrees.
:param num_spanwise_panels: int, optional
This is the number of spanwise panels to be used between this cross
section and the next one. The default
value is 8.
:param spanwise_spacing: str, optional
This is the Can be 'cosine' or 'uniform'. Highly recommended to be
cosine. The default value is
"""
# Initialize all the class attributes.
self.x_le = float(x_le)
self.y_le = float(y_le)
self.z_le = float(z_le)
self.chord = float(chord)
self.twist = float(twist)
self.airfoil = airfoil
self.control_surface_type = control_surface_type
self.control_surface_hinge_point = float(control_surface_hinge_point)
self.control_surface_deflection = float(control_surface_deflection)
self.num_spanwise_panels = num_spanwise_panels
self.spanwise_spacing = spanwise_spacing
self.leading_edge = np.array([x_le, y_le, z_le])
# Catch bad values of the chord length.
if self.chord <= 0:
raise Exception("Invalid value of chord")
# Catch invalid values of control_surface_type.
if self.control_surface_type not in ["symmetric", "asymmetric"]:
raise Exception("Invalid value of control_surface_type")
# Catch invalid values of spanwise_spacing.
if self.spanwise_spacing not in ["cosine", "uniform"]:
raise Exception("Invalid value of spanwise_spacing!")
def trailing_edge(self):
"""This method calculates the coordinates of the trailing edge of the cross
section.
:return trailing_edge: array
This is a 1D array that contains the coordinates of the cross section's
trailing edge.
"""
# Find the rotation matrix given the cross section's twist.
rotation_matrix = functions.angle_axis_rotation_matrix(
self.twist * np.pi / 180, np.array([0, 1, 0])
)
# Use the rotation matrix and the leading edge coordinates to calculate the
# trailing edge coordinates.
trailing_edge = self.leading_edge + rotation_matrix @ np.array(
[self.chord, 0.0, 0.0]
)
# Return the 1D array that contains the trailing edge's coordinates.
return trailing_edge
class Airfoil:
"""This class is used to contain the airfoil of a cross section of a wing of an
current_airplane.
Citation:
Adapted from: geometry.Airfoil in AeroSandbox
Author: <NAME>
Date of Retrieval: 04/27/2020
This class contains the following public methods:
populate_coordinates: This method populates a variable with the coordinates
of the airfoil.
populate_mcl_coordinates: This method creates a list of the airfoil's mean
camber line coordinates. It also creates two lists of the vectors needed to
go from the mcl coordinates to the upper and lower surfaces. It also creates
list of the thicknesses at the x coordinates along the mean camber line.
leading_edge_index: This method returns the index of the point along the
leading edge.
lower_coordinates: This method returns a matrix of x and y coordinates that
describe the lower surface of the airfoil.
upper_coordinates: This method returns a matrix of x and y coordinates that
describe the upper surface of the airfoil.
get_downsampled_mcl: This method returns the mean camber line in a
downsampled form.
get_camber_at_chord_fraction: This method returns the camber of the airfoil
at a given fraction of the chord.
repanel_current_airfoil: This method returns a repaneled version of the
airfoil with cosine-spaced coordinates on the upper and lower surfaces.
add_control_surface: This method returns a version of the airfoil with a
control surface added at a given point.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def __init__(
self,
name="Untitled Airfoil",
coordinates=None,
repanel=True,
n_points_per_side=400,
):
"""This is the initialization method.
:param name: str, optional
This is the name of the airfoil. It should correspond to the name in the
airfoils directory unless you are
passing in your own coordinates. The default is "Untitled Airfoil".
:param coordinates: array, optional
This is a N x 2 array of the airfoil's coordinates, where N is the
number of coordinates. Treat this
as an immutable, don't edit directly after initialization. If you wish to
load coordinates from the airfoil
directory, leave this as None. The default is None. Make sure that any
airfoil coordinates used range in x
from 0 to 1.
:param repanel: bool, optional
This is the variable that determines whether or not you would like to
repanel the airfoil coordinates. This
applies to coordinates passed in by the user or to the directory
coordinates. It is highly recommended to
set this to True. The default is True.
:param n_points_per_side: int, optional
This is number of points to use when repaneling the airfoil. It is
ignored if the repanel is False. The
default is 400.
"""
# Initialize the airfoil name.
self.name = name
# Check if the user supplied coordinates.
if coordinates is not None:
self.coordinates = coordinates
else:
# If not, populate the coordinates from the directory.
self.populate_coordinates() # populates self.coordinates
# Check that the coordinates have been set.
assert hasattr(self, "coordinates")
# Initialize other attributes.
self.repanel = repanel
self.mcl_coordinates = None
self.upper_minus_mcl = None
self.thickness = None
# If repanel is True, repanel the airfoil.
if self.repanel:
self.repanel_current_airfoil(n_points_per_side=n_points_per_side)
# Populate the mean camber line attributes.
self.populate_mcl_coordinates()
def populate_coordinates(self):
"""This method populates a variable with the coordinates of the airfoil.
The airfoil coordinates will either be generated, if the airfoil is a NACA
4-series airfoil, or loaded from the
the airfoil database (a folder named "airfoils" in this directory,
that contains a library of dat files for
airfoil coordinates). NACA 4-series airfoil generation is an adaptation of:
https://en.wikipedia.org/wiki/NACA_airfoil#Equation_for_a_cambered_4
-digit_NACA_airfoil.
:return: None
"""
# Sanitize the name input.
name = self.name.lower().strip()
# Check if the airfoil name is a NACA 4-series airfoil. If so, generate it.
if "naca" in name:
naca_number = name.split("naca")[1]
if naca_number.isdigit():
if len(naca_number) == 4:
# Parse the characteristics from the name.
max_camber = int(naca_number[0]) * 0.01
camber_loc = int(naca_number[1]) * 0.1
thickness = int(naca_number[2:]) * 0.01
# Set the number of points per side.
n_points_per_side = 100
# Make uncambered coordinates and generate cosine-spaced points.
x_t = functions.cosspace(0, 1, n_points_per_side)
y_t = (
5
* thickness
* (
+0.2969 * np.power(x_t, 0.5)
- 0.1260 * x_t
- 0.3516 * np.power(x_t, 2)
+ 0.2843 * np.power(x_t, 3)
- 0.1015 * np.power(x_t, 4)
)
)
# Prevent divide by zero errors for airfoils like the NACA 0012.
if camber_loc == 0:
camber_loc = 0.5
# Get the camber.
y_c_piece1 = (
max_camber
/ camber_loc ** 2
* (
2 * camber_loc * x_t[x_t <= camber_loc]
- x_t[x_t <= camber_loc] ** 2
)
)
y_c_piece2 = (
max_camber
/ (1 - camber_loc) ** 2
* (
(1 - 2 * camber_loc)
+ 2 * camber_loc * x_t[x_t > camber_loc]
- x_t[x_t > camber_loc] ** 2
)
)
y_c = np.hstack((y_c_piece1, y_c_piece2))
# Get camber slope.
first_piece_slope = (
2
* max_camber
/ camber_loc ** 2
* (camber_loc - x_t[x_t <= camber_loc])
)
second_piece_slope = (
2
* max_camber
/ (1 - camber_loc) ** 2
* (camber_loc - x_t[x_t > camber_loc])
)
slope = np.hstack((first_piece_slope, second_piece_slope))
theta = np.arctan(slope)
# Combine everything.
x_u = x_t - y_t * np.sin(theta)
x_l = x_t + y_t * np.sin(theta)
y_u = y_c + y_t * np.cos(theta)
y_l = y_c - y_t * np.cos(theta)
# Flip upper surface so it's back to front.
x_u, y_u = np.flipud(x_u), np.flipud(y_u)
# Trim 1 point from lower surface so there's no overlap.
x_l, y_l = x_l[1:], y_l[1:]
# Combine and format the coordinates.
x = np.hstack((x_u, x_l))
y = np.hstack((y_u, y_l))
coordinates = np.column_stack((x, y))
# Populate the coordinates attribute and return.
self.coordinates = coordinates
return
# Try to read from the airfoil directory.
try:
# Import the airfoils package as "airfoils".
airfoils = importlib.import_module(
name=".airfoils",
package="pterasoftware",
)
# Read the text from the airfoil file.
raw_text = importlib.resources.read_text(airfoils, name + ".dat")
# Trim the text at the return characters.
trimmed_text = raw_text[raw_text.find("\n") :]
# Input the coordinates into a 1D array.
coordinates_1d = np.fromstring(trimmed_text, sep="\n")
# Check to make sure the number of elements in the array is even.
assert len(coordinates_1d) % 2 == 0, (
"File was found in airfoil database, "
"but it could not be read correctly."
)
# Reshape the 1D coordinates array into a N x 2 array, where N is the
# number of rows.
coordinates = np.reshape(coordinates_1d, (-1, 2))
# Populate the coordinates attribute and return.
self.coordinates = coordinates
return
# If the airfoil was not a NACA 4-series and was not found in the
# database, throw an error.
except FileNotFoundError:
raise Exception("Airfoil not in database!")
def populate_mcl_coordinates(self):
"""This method creates a list of the airfoil's mean camber line coordinates.
It also creates two lists of the
vectors needed to go from the mcl coordinates to the upper and lower
surfaces. It also creates list of the
thicknesses at the x coordinates along the mean camber line.
All vectors are listed from the leading edge to the trailing edge of the
airfoil.
:return: None
"""
# Get the upper and lower coordinates. Flip the upper coordinates so that it
# is ordered from the leading edge to
# the trailing edge.
upper = np.flipud(self.upper_coordinates())
lower = self.lower_coordinates()
# Calculate the approximate mean camber line and populate the class attribute.
mcl_coordinates = (upper + lower) / 2
self.mcl_coordinates = mcl_coordinates
# Find the vectors from each mean camber line coordinate to its upper
# coordinate.
self.upper_minus_mcl = upper - self.mcl_coordinates
# Create a list of values that are the thickness of the airfoil at each mean
# camber line.
thickness = np.sqrt(np.sum(np.power(self.upper_minus_mcl, 2), axis=1)) * 2
# Populate the class attribute with the thicknesses at their associated x
# coordinates.
self.thickness = np.column_stack((self.mcl_coordinates[:, 0], thickness))
def leading_edge_index(self):
"""Returns the index of the leading edge point.
:return leading_edge_index: int
This is the index of the leading edge point.
"""
# Find the index of the coordinate pair with the minimum value of the x
# coordinate. This is the leading edge
# index.
leading_edge_index = np.argmin(self.coordinates[:, 0])
# Return the leading edge index.
return leading_edge_index
def lower_coordinates(self):
"""This method returns a matrix of x and y coordinates that describe the
lower surface of the airfoil.
The order of the returned matrix is from leading edge to trailing edge. This
matrix includes the leading edge
point so be careful about duplicates if using this method in conjunction with
self.upper_coordinates.
:return lower_coordinates: array
This is a N x 2 array of x and y coordinates that describe the lower
surface of the airfoil, where N
is the number of points.
"""
# Find the lower coordinates.
lower_coordinates = self.coordinates[self.leading_edge_index() :, :]
# Return the lower coordinates.
return lower_coordinates
def upper_coordinates(self):
"""This method returns a matrix of x and y coordinates that describe the
upper surface of the airfoil.
The order of the returned matrix is from trailing edge to leading edge. This
matrix includes the leading edge
point so be careful about duplicates if using this method in conjunction with
self.lower_coordinates.
:return upper_coordinates: array
This is a N x 2 array of x and y coordinates that describe the upper
surface of the airfoil, where N
is the number of points.
"""
# Find the upper coordinates.
upper_coordinates = self.coordinates[: self.leading_edge_index() + 1, :]
# Return the upper coordinates.
return upper_coordinates
def get_downsampled_mcl(self, mcl_fractions):
"""This method returns the mean camber line in a downsampled form.
:param mcl_fractions: 1D array
This is a 1D array that lists the points along the mean camber line (
normalized from 0 to 1) at which
to return the mean camber line coordinates.
:return mcl_downsampled: 2D array
This is a 2D array that contains the coordinates of the downsampled
mean camber line.
"""
mcl = self.mcl_coordinates
# Find the distances between points along the mean camber line, assuming
# linear interpolation.
mcl_distances_between_points = np.sqrt(
np.power(mcl[:-1, 0] - mcl[1:, 0], 2)
+ np.power(mcl[:-1, 1] - mcl[1:, 1], 2)
)
# Create a horizontal 1D array that contains the distance along the mean
# camber line of each point.
mcl_distances_cumulative = np.hstack(
(0, np.cumsum(mcl_distances_between_points))
)
# Normalize the 1D array so that it ranges from 0 to 1.
mcl_distances_cumulative_normalized = (
mcl_distances_cumulative / mcl_distances_cumulative[-1]
)
# Linearly interpolate to find the x coordinates of the mean camber line at
# the given mean camber line
# fractions.
mcl_downsampled_x = np.interp(
x=mcl_fractions, xp=mcl_distances_cumulative_normalized, fp=mcl[:, 0]
)
# Linearly interpolate to find the y coordinates of the mean camber line at
# the given mean camber line
# fractions.
mcl_downsampled_y = np.interp(
x=mcl_fractions, xp=mcl_distances_cumulative_normalized, fp=mcl[:, 1]
)
# Combine the x and y coordinates of the downsampled mean camber line.
mcl_downsampled = np.column_stack((mcl_downsampled_x, mcl_downsampled_y))
# Return the coordinates of the downsampled mean camber line.
return mcl_downsampled
def get_camber_at_chord_fraction(self, chord_fraction):
"""This method returns the camber of the airfoil at a given fraction of the
chord.
:param chord_fraction: float
This is a float of the fraction along the chord (normalized from 0 to 1)
at which to return the camber.
:return camber: float
This is the camber of the airfoil at the requested fraction along the chord.
"""
# Create a function that interpolates between the x and y coordinates of the
# mean camber line.
camber_function = sp_interp.interp1d(
x=self.mcl_coordinates[:, 0],
y=self.mcl_coordinates[:, 1],
copy=False,
fill_value="extrapolate",
)
# Find the value of the camber (the y coordinate) of the airfoil at the
# requested chord fraction.
camber = camber_function(chord_fraction)
# Return the camber of the airfoil at the requested chord fraction.
return camber
def repanel_current_airfoil(self, n_points_per_side=100):
"""This method returns a repaneled version of the airfoil with cosine-spaced
coordinates on the upper and lower
surfaces.
The number of points defining the final airfoil will be (n_points_per_side *
2 - 1), since the leading edge
point is shared by both the upper and lower surfaces.
:param n_points_per_side: int, optional
This is the number of points on the upper and lower surfaces. The default
value is 100.
:return: None
"""
# Get the upper and lower surface coordinates. These both contain the leading
# edge point.
upper_original_coordinates = self.upper_coordinates()
lower_original_coordinates = self.lower_coordinates()
# Generate a cosine-spaced list of points from 0 to 1.
cosine_spaced_x_values = functions.cosspace(0, 1, n_points_per_side)
# Create interpolated functions for the x and y values of the upper and lower
# surfaces as a function of the
# chord fractions
upper_func = sp_interp.PchipInterpolator(
x=np.flip(upper_original_coordinates[:, 0]),
y=np.flip(upper_original_coordinates[:, 1]),
)
lower_func = sp_interp.PchipInterpolator(
x=lower_original_coordinates[:, 0], y=lower_original_coordinates[:, 1]
)
# Find the x and y coordinates of the upper and lower surfaces at each of the
# cosine-spaced x values.
x_coordinates = np.hstack(
(np.flip(cosine_spaced_x_values), cosine_spaced_x_values[1:])
)
y_coordinates = np.hstack(
(
upper_func(np.flip(cosine_spaced_x_values)),
lower_func(cosine_spaced_x_values[1:]),
)
)
# Stack the coordinates together and return them.
coordinates = np.column_stack((x_coordinates, y_coordinates))
self.coordinates = coordinates
def add_control_surface(self, deflection=0.0, hinge_point=0.75):
"""This method returns a version of the airfoil with a control surface added
at a given point.
:param deflection: float, optional
This is the deflection angle in degrees. Deflection downwards is
positive. The default value is 0.0.
:param hinge_point: float, optional
This is the location of the hinge as a fraction of chord length. The
default value is 0.75.
:return flapped_airfoil: Airfoil
This is the new airfoil with the control surface added.
"""
# Insure that the airfoil's deflection is not too high, which increases the
# risk of self intersection.
if deflection > 90 or deflection < -90:
raise Exception("Invalid value for deflection!")
# Make the rotation matrix for the given angle.
sin_theta = np.sin(np.radians(-deflection))
cos_theta = np.cos(np.radians(-deflection))
rotation_matrix = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]])
# Find y coordinate at the hinge point x coordinate and make it a vector.
hinge_point = np.array(
(hinge_point, self.get_camber_at_chord_fraction(hinge_point))
)
# Split the airfoil into the sections before and after the hinge.
split_index = np.where(self.mcl_coordinates[:, 0] > hinge_point[0])[0][0]
mcl_coordinates_before = self.mcl_coordinates[:split_index, :]
mcl_coordinates_after = self.mcl_coordinates[split_index:, :]
upper_minus_mcl_before = self.upper_minus_mcl[:split_index, :]
upper_minus_mcl_after = self.upper_minus_mcl[split_index:, :]
# Rotate the mean camber line coordinates and upper minus mean camber line
# vectors.
new_mcl_coordinates_after = (
np.transpose(
rotation_matrix @ np.transpose(mcl_coordinates_after - hinge_point)
)
+ hinge_point
)
new_upper_minus_mcl_after = np.transpose(
rotation_matrix @ np.transpose(upper_minus_mcl_after)
)
# Assemble the new, flapped airfoil.
new_mcl_coordinates = np.vstack(
(mcl_coordinates_before, new_mcl_coordinates_after)
)
new_upper_minus_mcl = np.vstack(
(upper_minus_mcl_before, new_upper_minus_mcl_after)
)
upper_coordinates = np.flipud(new_mcl_coordinates + new_upper_minus_mcl)
lower_coordinates = new_mcl_coordinates - new_upper_minus_mcl
coordinates = np.vstack((upper_coordinates, lower_coordinates[1:, :]))
# Initialize the new, flapped airfoil and return it.
flapped_airfoil = Airfoil(
name=self.name + " flapped", coordinates=coordinates, repanel=False
)
return flapped_airfoil
def draw(self):
x = self.coordinates[:, 0]
y = self.coordinates[:, 1]
plt.plot(x, y)
plt.xlim(0, 1)
plt.ylim(-0.5, 0.5)
plt.gca().set_aspect("equal", adjustable="box")
plt.show()
| [
"numpy.radians",
"numpy.hstack",
"numpy.column_stack",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.sin",
"numpy.flip",
"numpy.reshape",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.empty",
"numpy.vstack",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"numpy.fromstring",
"numpy.arctan",
"numpy.flipud",
"matplotlib.pyplot.gca",
"scipy.interpolate.PchipInterpolator",
"numpy.cos",
"numpy.interp",
"matplotlib.pyplot.xlim",
"numpy.transpose",
"matplotlib.pyplot.show",
"numpy.power",
"numpy.zeros",
"numpy.cumsum"
] | [((10680, 10726), 'numpy.empty', 'np.empty', (['(0, self.num_spanwise_panels + 1, 3)'], {}), '((0, self.num_spanwise_panels + 1, 3))\n', (10688, 10726), True, 'import numpy as np\n'), ((10761, 10814), 'numpy.zeros', 'np.zeros', (['(0, self.num_spanwise_panels)'], {'dtype': 'object'}), '((0, self.num_spanwise_panels), dtype=object)\n', (10769, 10814), True, 'import numpy as np\n'), ((16009, 16037), 'numpy.array', 'np.array', (['[x_le, y_le, z_le]'], {}), '([x_le, y_le, z_le])\n', (16017, 16037), True, 'import numpy as np\n'), ((28626, 28682), 'numpy.column_stack', 'np.column_stack', (['(self.mcl_coordinates[:, 0], thickness)'], {}), '((self.mcl_coordinates[:, 0], thickness))\n', (28641, 28682), True, 'import numpy as np\n'), ((29058, 29091), 'numpy.argmin', 'np.argmin', (['self.coordinates[:, 0]'], {}), '(self.coordinates[:, 0])\n', (29067, 29091), True, 'import numpy as np\n'), ((32197, 32282), 'numpy.interp', 'np.interp', ([], {'x': 'mcl_fractions', 'xp': 'mcl_distances_cumulative_normalized', 'fp': 'mcl[:, 0]'}), '(x=mcl_fractions, xp=mcl_distances_cumulative_normalized, fp=mcl[:, 0]\n )\n', (32206, 32282), True, 'import numpy as np\n'), ((32471, 32556), 'numpy.interp', 'np.interp', ([], {'x': 'mcl_fractions', 'xp': 'mcl_distances_cumulative_normalized', 'fp': 'mcl[:, 1]'}), '(x=mcl_fractions, xp=mcl_distances_cumulative_normalized, fp=mcl[:, 1]\n )\n', (32480, 32556), True, 'import numpy as np\n'), ((32680, 32735), 'numpy.column_stack', 'np.column_stack', (['(mcl_downsampled_x, mcl_downsampled_y)'], {}), '((mcl_downsampled_x, mcl_downsampled_y))\n', (32695, 32735), True, 'import numpy as np\n'), ((33435, 33556), 'scipy.interpolate.interp1d', 'sp_interp.interp1d', ([], {'x': 'self.mcl_coordinates[:, 0]', 'y': 'self.mcl_coordinates[:, 1]', 'copy': '(False)', 'fill_value': '"""extrapolate"""'}), "(x=self.mcl_coordinates[:, 0], y=self.mcl_coordinates[:, \n 1], copy=False, fill_value='extrapolate')\n", (33453, 33556), True, 'import scipy.interpolate as sp_interp\n'), ((35189, 35293), 'scipy.interpolate.PchipInterpolator', 'sp_interp.PchipInterpolator', ([], {'x': 'lower_original_coordinates[:, 0]', 'y': 'lower_original_coordinates[:, 1]'}), '(x=lower_original_coordinates[:, 0], y=\n lower_original_coordinates[:, 1])\n', (35216, 35293), True, 'import scipy.interpolate as sp_interp\n'), ((35822, 35869), 'numpy.column_stack', 'np.column_stack', (['(x_coordinates, y_coordinates)'], {}), '((x_coordinates, y_coordinates))\n', (35837, 35869), True, 'import numpy as np\n'), ((36958, 37017), 'numpy.array', 'np.array', (['[[cos_theta, -sin_theta], [sin_theta, cos_theta]]'], {}), '([[cos_theta, -sin_theta], [sin_theta, cos_theta]])\n', (36966, 37017), True, 'import numpy as np\n'), ((38159, 38221), 'numpy.vstack', 'np.vstack', (['(mcl_coordinates_before, new_mcl_coordinates_after)'], {}), '((mcl_coordinates_before, new_mcl_coordinates_after))\n', (38168, 38221), True, 'import numpy as np\n'), ((38274, 38336), 'numpy.vstack', 'np.vstack', (['(upper_minus_mcl_before, new_upper_minus_mcl_after)'], {}), '((upper_minus_mcl_before, new_upper_minus_mcl_after))\n', (38283, 38336), True, 'import numpy as np\n'), ((38387, 38439), 'numpy.flipud', 'np.flipud', (['(new_mcl_coordinates + new_upper_minus_mcl)'], {}), '(new_mcl_coordinates + new_upper_minus_mcl)\n', (38396, 38439), True, 'import numpy as np\n'), ((38532, 38588), 'numpy.vstack', 'np.vstack', (['(upper_coordinates, lower_coordinates[1:, :])'], {}), '((upper_coordinates, lower_coordinates[1:, :]))\n', (38541, 38588), True, 'import numpy as np\n'), ((38906, 38920), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (38914, 38920), True, 'import matplotlib.pyplot as plt\n'), ((38929, 38943), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (38937, 38943), True, 'import matplotlib.pyplot as plt\n'), ((38952, 38971), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (38960, 38971), True, 'import matplotlib.pyplot as plt\n'), ((39036, 39046), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39044, 39046), True, 'import matplotlib.pyplot as plt\n'), ((17013, 17032), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (17021, 17032), True, 'import numpy as np\n'), ((26423, 26460), 'numpy.fromstring', 'np.fromstring', (['trimmed_text'], {'sep': '"""\n"""'}), "(trimmed_text, sep='\\n')\n", (26436, 26460), True, 'import numpy as np\n'), ((26853, 26888), 'numpy.reshape', 'np.reshape', (['coordinates_1d', '(-1, 2)'], {}), '(coordinates_1d, (-1, 2))\n', (26863, 26888), True, 'import numpy as np\n'), ((36855, 36878), 'numpy.radians', 'np.radians', (['(-deflection)'], {}), '(-deflection)\n', (36865, 36878), True, 'import numpy as np\n'), ((36907, 36930), 'numpy.radians', 'np.radians', (['(-deflection)'], {}), '(-deflection)\n', (36917, 36930), True, 'import numpy as np\n'), ((17227, 17259), 'numpy.array', 'np.array', (['[self.chord, 0.0, 0.0]'], {}), '([self.chord, 0.0, 0.0])\n', (17235, 17259), True, 'import numpy as np\n'), ((31503, 31540), 'numpy.power', 'np.power', (['(mcl[:-1, 0] - mcl[1:, 0])', '(2)'], {}), '(mcl[:-1, 0] - mcl[1:, 0], 2)\n', (31511, 31540), True, 'import numpy as np\n'), ((31555, 31592), 'numpy.power', 'np.power', (['(mcl[:-1, 1] - mcl[1:, 1])', '(2)'], {}), '(mcl[:-1, 1] - mcl[1:, 1], 2)\n', (31563, 31592), True, 'import numpy as np\n'), ((31784, 31823), 'numpy.cumsum', 'np.cumsum', (['mcl_distances_between_points'], {}), '(mcl_distances_between_points)\n', (31793, 31823), True, 'import numpy as np\n'), ((35058, 35099), 'numpy.flip', 'np.flip', (['upper_original_coordinates[:, 0]'], {}), '(upper_original_coordinates[:, 0])\n', (35065, 35099), True, 'import numpy as np\n'), ((35115, 35156), 'numpy.flip', 'np.flip', (['upper_original_coordinates[:, 1]'], {}), '(upper_original_coordinates[:, 1])\n', (35122, 35156), True, 'import numpy as np\n'), ((35480, 35511), 'numpy.flip', 'np.flip', (['cosine_spaced_x_values'], {}), '(cosine_spaced_x_values)\n', (35487, 35511), True, 'import numpy as np\n'), ((37314, 37367), 'numpy.where', 'np.where', (['(self.mcl_coordinates[:, 0] > hinge_point[0])'], {}), '(self.mcl_coordinates[:, 0] > hinge_point[0])\n', (37322, 37367), True, 'import numpy as np\n'), ((38037, 38072), 'numpy.transpose', 'np.transpose', (['upper_minus_mcl_after'], {}), '(upper_minus_mcl_after)\n', (38049, 38072), True, 'import numpy as np\n'), ((38980, 38989), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38987, 38989), True, 'import matplotlib.pyplot as plt\n'), ((24303, 24338), 'numpy.hstack', 'np.hstack', (['(y_c_piece1, y_c_piece2)'], {}), '((y_c_piece1, y_c_piece2))\n', (24312, 24338), True, 'import numpy as np\n'), ((24880, 24930), 'numpy.hstack', 'np.hstack', (['(first_piece_slope, second_piece_slope)'], {}), '((first_piece_slope, second_piece_slope))\n', (24889, 24930), True, 'import numpy as np\n'), ((24959, 24975), 'numpy.arctan', 'np.arctan', (['slope'], {}), '(slope)\n', (24968, 24975), True, 'import numpy as np\n'), ((25563, 25584), 'numpy.hstack', 'np.hstack', (['(x_u, x_l)'], {}), '((x_u, x_l))\n', (25572, 25584), True, 'import numpy as np\n'), ((25609, 25630), 'numpy.hstack', 'np.hstack', (['(y_u, y_l)'], {}), '((y_u, y_l))\n', (25618, 25630), True, 'import numpy as np\n'), ((25665, 25688), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (25680, 25688), True, 'import numpy as np\n'), ((28447, 28480), 'numpy.power', 'np.power', (['self.upper_minus_mcl', '(2)'], {}), '(self.upper_minus_mcl, 2)\n', (28455, 28480), True, 'import numpy as np\n'), ((35627, 35658), 'numpy.flip', 'np.flip', (['cosine_spaced_x_values'], {}), '(cosine_spaced_x_values)\n', (35634, 35658), True, 'import numpy as np\n'), ((37857, 37906), 'numpy.transpose', 'np.transpose', (['(mcl_coordinates_after - hinge_point)'], {}), '(mcl_coordinates_after - hinge_point)\n', (37869, 37906), True, 'import numpy as np\n'), ((25323, 25337), 'numpy.flipud', 'np.flipud', (['x_u'], {}), '(x_u)\n', (25332, 25337), True, 'import numpy as np\n'), ((25339, 25353), 'numpy.flipud', 'np.flipud', (['y_u'], {}), '(y_u)\n', (25348, 25353), True, 'import numpy as np\n'), ((25057, 25070), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (25063, 25070), True, 'import numpy as np\n'), ((25109, 25122), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (25115, 25122), True, 'import numpy as np\n'), ((25161, 25174), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (25167, 25174), True, 'import numpy as np\n'), ((25213, 25226), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (25219, 25226), True, 'import numpy as np\n'), ((23323, 23339), 'numpy.power', 'np.power', (['x_t', '(4)'], {}), '(x_t, 4)\n', (23331, 23339), True, 'import numpy as np\n'), ((23267, 23283), 'numpy.power', 'np.power', (['x_t', '(3)'], {}), '(x_t, 3)\n', (23275, 23283), True, 'import numpy as np\n'), ((23211, 23227), 'numpy.power', 'np.power', (['x_t', '(2)'], {}), '(x_t, 2)\n', (23219, 23227), True, 'import numpy as np\n'), ((23110, 23128), 'numpy.power', 'np.power', (['x_t', '(0.5)'], {}), '(x_t, 0.5)\n', (23118, 23128), True, 'import numpy as np\n')] |
import requests
import pandas as pd
import random
import os
global line
line = 0
def BeginChartReading(url):
print(url + "-------> Pached")
strhtml = requests.get(url)
strhtml.encoding='utf-8'
print (strhtml.headers['content-type'])
global line
dataframe = pd.DataFrame({'url':url,'url_text':strhtml.text},index=[line])
line=line+1
dataframe.to_csv("PatchWebdatabin.csv",encoding='utf-8',index=False,sep=',')
def BaiduPacher(name):
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
}
defSearchurl = "http://www.baidu.com/s?ie=UTF-8&wd="
SearchInitUrl = defSearchurl + name #合成网址
strhtml = requests.get(SearchInitUrl,headers = headers) #请求网址内容
strhtml.encoding='utf-8'
print (strhtml.text)
target_dir = "patched_html/" + name #存储目标目录
mkdir(target_dir)
saveHtml("Result-" + str(random.randint(100000000,900000000)),strhtml.text,target_dir)
return
def BingPacher(name):
return
def Search360Pacher(name):
return
def OtherWebSearchPacher(name,configs,websearchurls):
return
def PatchConfigs(config):
return
def UpdateRecognise(UpdateConfig,urls,texts,User):
return
def StrogeData(Mode,Charset,Text,dirs):
return
def saveHtml(file_name,file_content,data_dir):
b1 = bytes( file_content,encoding = "utf8")
with open(data_dir +file_name.replace('/','_')+'.html','wb') as f:
f.write(b1)
def mkdir(path):
nowpath = os.getcwd()
folder = os.path.exists(nowpath +"\\"+ path)
if not folder: #判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(nowpath +"\\"+ path) #makedirs 创建文件时如果路径不存在会创建这个路径
print ("--- new folder... ---")
print ("--- OK ---")
else:
print ("--- There is this folder! ---")
| [
"os.path.exists",
"os.makedirs",
"requests.get",
"os.getcwd",
"pandas.DataFrame",
"random.randint"
] | [((170, 187), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (182, 187), False, 'import requests\n'), ((297, 363), 'pandas.DataFrame', 'pd.DataFrame', (["{'url': url, 'url_text': strhtml.text}"], {'index': '[line]'}), "({'url': url, 'url_text': strhtml.text}, index=[line])\n", (309, 363), True, 'import pandas as pd\n'), ((797, 841), 'requests.get', 'requests.get', (['SearchInitUrl'], {'headers': 'headers'}), '(SearchInitUrl, headers=headers)\n', (809, 841), False, 'import requests\n'), ((1669, 1680), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1678, 1680), False, 'import os\n'), ((1695, 1732), 'os.path.exists', 'os.path.exists', (["(nowpath + '\\\\' + path)"], {}), "(nowpath + '\\\\' + path)\n", (1709, 1732), False, 'import os\n'), ((1816, 1850), 'os.makedirs', 'os.makedirs', (["(nowpath + '\\\\' + path)"], {}), "(nowpath + '\\\\' + path)\n", (1827, 1850), False, 'import os\n'), ((1013, 1049), 'random.randint', 'random.randint', (['(100000000)', '(900000000)'], {}), '(100000000, 900000000)\n', (1027, 1049), False, 'import random\n')] |
import numpy as np
from sklearn.model_selection import KFold
from graspy.embed import MultipleASE
from .base import SupervisedLearningPipeline
class MASEPipeline(SupervisedLearningPipeline):
def __init__(self,
learning_method,
memory=None,
verbose=False,
plot_method=None,
kfold = KFold(n_splits=5)
):
super(MASEPipeline, self).__init__(steps=[('MASE', MultipleASE())]+learning_method, memory=memory, verbose=verbose, plot_method=plot_method, kfold=kfold)
if plot_method is not None:
self.plot = plot_method
if kfold is not None:
self.kfold = kfold
def cross_val_score(self, dataset, labels):
test_results = []
for train_index, test_index in self.kfold.split(dataset):
dataset= np.array(dataset)
dataset_train, dataset_test = dataset[train_index], dataset[test_index]
self.fit(dataset_train, labels)
test_results.append(self.score(dataset_test, labels))
avg_score = sum(test_results)/len(test_results)
return avg_score, test_results
| [
"numpy.array",
"sklearn.model_selection.KFold",
"graspy.embed.MultipleASE"
] | [((308, 325), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (313, 325), False, 'from sklearn.model_selection import KFold\n'), ((729, 746), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (737, 746), True, 'import numpy as np\n'), ((386, 399), 'graspy.embed.MultipleASE', 'MultipleASE', ([], {}), '()\n', (397, 399), False, 'from graspy.embed import MultipleASE\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: hanshengjiang
"""
from optimal_pricing_policy_exp_update import *
import random
import time
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
raise ValueError("Please provide experiments configuration!")
config = sys.argv[1]
# default
if len(sys.argv) < 7:
epsilon = 0.01 # price discretization accuracy
L = 0 #lower bound of price
H = 10 #upper bound of price
T = np.inf
theta = 0.8
gamma = 0.95
# otherwise take arguments from command line
else:
#sys_argv[0] is the name of the .py file
epsilon = float(sys.argv[2])
L = float(sys.argv[3]) # number of data points
H = float(sys.argv[4])
T = float(sys.argv[5])
if T != np.inf:
T = int(T)
theta = float(sys.argv[6])
price_list = np.arange(L-epsilon,H+2*epsilon,epsilon)
if config == 'mmnl_1':
coefficients = (1,1,1,1)
weights = (1,0,0)
demand_name = 'mmnl'
elif config == 'mmnl_2_1':
coefficients = (-5,0,1,5)
weights = (1,0,0)
demand_name = 'mmnl'
elif config == 'mmnl_2_2':
coefficients = (2,10,0.5,1)
weights = (1,0,0)
demand_name = 'mmnl'
elif config == 'mmnl_3':
coefficients = (2,2,0.2,0.2,-1,0.2,0,0)
weights = (0.5,0.5,0)
demand_name = 'mmnl'
elif config == 'mmnl_3_1':
coefficients = (2,2,0.2,0.2)
weights = (1,0,0)
demand_name = 'mmnl'
elif config == 'mmnl_3_2':
coefficients = (-1,0.2,0,0)
weights = (1,0,0)
demand_name = 'mmnl'
elif config == 'mmnl_myopic':
coefficients = (2,2,0.2,0.2)
weights = (1,0,0)
demand_name = 'mmnl'
# name prefix for saved plots
coeffname = 'inf_hor_fixed_price_range'+str(L).replace(".","dot")+'_'+str(H).replace(".","dot")+'_'+str(T)+'_'\
+str(coefficients).replace(" ", "_").replace(".","dot")\
+str(weights).replace(" ","_").replace(".","dot")+'_'+str(theta).replace(".", "dot")+'_'+str(gamma).replace(".", "dot")+demand_name
#---------------------------------#
# optimal pricing policy
start_time = time.time()
#optimal pricing policy
V,mu \
= inf_hor_pricing_pricerange(L,H,theta,epsilon,np.inf,gamma,coefficients,weights,demand_name)
print("running time {}".format(time.time()- start_time))
#---------------------------------#
#---------------------------------#
# plot results
#---------------------------------#
#---------------reference price transition function-------------------------#
fig, ax = plt.subplots()
plt.tight_layout()
#ax.plot(price_list[np.arange(len(mu)).astype(int)], price_list[mu.astype(int)],\
# color = 'black');
ax.scatter(price_list[np.arange(len(mu)).astype(int)], price_list[mu.astype(int)],\
color = 'black',marker = 'o', facecolors = 'none', s = 5); # s controls markersize
ax.set_xlabel(r"$r_t$",size = 16);
ax.set_ylabel(r"$r_{t+1}$",size = 16);
ax.set_ylim(L-1,H+1)
fig.savefig('./../pricing_output/%s_referenceprice'%coeffname, dpi= 300)
#---------------value function------------------------#
fig1, ax1 = plt.subplots()
plt.tight_layout()
ax1.plot(price_list[np.arange(len(mu)).astype(int)], \
V, color = 'black');
ax1.set_xlabel(r"$r$",size = 16);
ax1.set_ylabel(r"$V^*(r)$",size = 16);
#ax2.set_ylim(,)
fig1.savefig('./../pricing_output/%s_value_function'%coeffname, dpi= 300)
#---------------pricing policy function-------------------------#
fig2, ax2 = plt.subplots()
plt.tight_layout()
#ax2.plot(price_list[np.arange(len(mu)).astype(int)],
# (price_list[mu.astype(int)] - theta * price_list[np.arange(len(mu)).astype(int)])/(1-theta),\
# color = 'black');
ax2.scatter(price_list[np.arange(len(mu)).astype(int)],
(price_list[mu.astype(int)] - theta * price_list[np.arange(len(mu)).astype(int)])/(1-theta),\
color = 'black', marker = 'o', s= 5);
ax2.set_xlabel(r"$r$",size = 16);
ax2.set_ylabel(r"$p^*(r)$",size = 16);
ax2.set_ylim(L-1,H+1)
fig2.savefig('./../pricing_output/%s_price'%coeffname, dpi= 300)
#---------------price path-------------------------#
# plot price path
start_ = 0 # staring time for plots
end_ = 20
i_li = random.sample(range(len(V)),10)
i_li.append(int((H/epsilon)/2)+1)
for i in i_li:
c = []
id = i
# need to add the first reference price
c.append(id)
#apply the policy a few times
for j in range(end_):
c.append(int(mu[id]))
id = int(mu[id])
fig3, ax3 = plt.subplots()
plt.tight_layout()
price_path = (price_list[c[1:]] - theta * price_list[c[:-1]])/(1-theta)
ax3.plot(np.arange(start_,end_),price_path[start_:],color = 'black',linestyle = '--')
ax3.scatter(np.arange(start_,end_),price_path[start_:],marker = 'o',facecolors = 'none',edgecolors = 'black')
ax3.set_ylim(L,H+1)
ax3.set_xticks(np.arange(start_,end_+1,5))
ax3.set_xlabel('Time', size = 16)
#ax3.set_ylabel('optimal price path',size = 16)
ax3.set_ylabel('Price',size = 16)
coeffname_r0 = coeffname + 'r0=' + str(round(price_list[i],2)).replace(".","dot")
fig3.savefig('./../pricing_output/%s_price_path'%coeffname_r0, dpi= 300)
#---------------long term price path plot-------------------------#
id = i
c = []
start_ = 80
end_ = 100
# need to add the first reference price
c.append(id)
for j in range(end_):
c.append(int(mu[id]))
id = int(mu[id])
fig4, ax4 = plt.subplots()
plt.tight_layout()
price_path = (price_list[c[1:]] - theta * price_list[c[:-1]])/(1-theta)
ax4.plot(np.arange(start_,end_),price_path[start_:],color = 'black',linestyle = '--')
ax4.scatter(np.arange(start_,end_),price_path[start_:],marker = 'o',facecolors = 'none',edgecolors = 'black')
ax4.set_ylim(L,H+1)
ax4.set_xticks(np.arange(start_,end_+1,5))
ax4.set_xlabel('Time', size = 16)
#ax4.set_ylabel('optimal price path',size = 16)
ax4.set_ylabel('Price',size = 16)
coeffname_r0 = coeffname + 'r0=' + str(round(price_list[i],2)).replace(".","dot")+'100time'
fig4.savefig('./../pricing_output/%s_longterm_price_path'%coeffname_r0, dpi= 300)
#--------------revenue plot for heterogeneous market --------------------#
c = []
id = int((H/epsilon)/2)+1
# need to add the first reference price
c.append(id)
#### plot revenues
for j in range(20):
c.append(int(mu[id]))
id = int(mu[id])
fig3, ax3 = plt.subplots(figsize = (10,5))
price_path = (price_list[c[1:]] - theta * price_list[c[:-1]])/(1-theta)
r1 = []
r2 = []
for j in range(20):
r = price_list[c[j]]
p = price_path[j]
print("================{}================".format(j))
print(r,p,coefficients,weights[0])
temp1 = R(r,p,coefficients[:4],(weights[0],0,0))
print("temp1", temp1)
r1.append(temp1)
temp2 = R(r,p,coefficients[4:],(weights[1],0,0))
print("temp2", temp2)
r2.append(temp2)
r = np.array(r1) + np.array(r2)
ax3.plot(np.arange(20),r,label = r'Total',marker = 'o',mfc = 'none',linestyle = '-',color = 'tab:gray')
ax3.plot(np.arange(20),r1,label = r'Consumer $A$',marker = 'd',mfc = 'none',linestyle = '--',color= 'tab:blue')
ax3.plot(np.arange(20),r2,label = r'Consumer $B$', marker = 's',mfc = 'none', linestyle = ':',color = 'tab:red')
# ax3.scatter(np.arange(20),price_path[start_:],marker = 'o',facecolors = 'none',edgecolors = 'black')
#ax3.set_ylim(L,H+1)
ax3.set_xticks(np.arange(0,21,5))
ax3.set_xlabel('Time', size = 16)
ax3.set_ylabel('Expected Revenue',size = 16)
plt.legend(bbox_to_anchor=(1.02, 1))
plt.tight_layout()
coeffname_r0 = coeffname + 'r0=' + str(round(price_list[i],2)).replace(".","dot")
fig3.savefig('./../pricing_output/%s_revenue'%coeffname_r0, dpi= 300)
| [
"time.time"
] | [((2143, 2154), 'time.time', 'time.time', ([], {}), '()\n', (2152, 2154), False, 'import time\n'), ((2313, 2324), 'time.time', 'time.time', ([], {}), '()\n', (2322, 2324), False, 'import time\n')] |
# -*- coding: utf8 -*-
'''
========================================================================
CygnusCloud
========================================================================
File: clusterEndpointDB.py
Version: 5.0
Description: full cluster endpoint database connector
Copyright 2012-13 <NAME>, <NAME>,
<NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from editionState_t import EDITION_STATE_T
from clusterEndpoint.databases.minimalClusterEndpointDBConnector import MinimalClusterEndpointDBConnector
class ClusterEndpointDBConnector(MinimalClusterEndpointDBConnector):
"""
Initializes the connector's state
Args:
sqlUser: the MySQL user to use
sqlPassword: the MySQL password to use
databaseName: a MySQL database name.
"""
def __init__(self, sqlUser, sqlPassword, databaseName):
MinimalClusterEndpointDBConnector.__init__(self, sqlUser, sqlPassword, databaseName)
self.__vmServerSegmentsData = []
self.__vmServerSegments = 0
self.__vmServerResourceUsageData = []
self.__vmServerResourceUsageSegments = 0
self.__imageDistributionSegmentsData = []
self.__imageDistributionSegments = 0
self.__activeVMSegmentsData = dict()
self.__activeVMSegments = dict()
def processVMServerResourceUsageSegment(self, segmentNumber, segmentCount, data):
"""
Processes a virtual machine server resource usage segment
Args:
segmentNumber: the segment's position in the sequence
segmentCount: the number of segments in the sequence
data: the segment's data
Returns:
Nothing
"""
if (data != []) :
self.__vmServerResourceUsageData += data
self.__vmServerResourceUsageSegments += 1
if (self.__vmServerResourceUsageSegments == segmentCount) :
receivedData = ClusterEndpointDBConnector.__getVMServersDictionary(self.__vmServerResourceUsageData)
registeredIDs = self.__getKnownVMServerIDs("VirtualMachineServerStatus")
if (registeredIDs != None) :
for ID in registeredIDs :
if not (receivedData.has_key(ID)) :
self.__deleteVMServerStatusData(ID)
inserts = []
for ID in receivedData.keys() :
if (registeredIDs != None and ID in registeredIDs) :
self.__updateVMServerStatusData(receivedData[ID])
else :
inserts.append(receivedData[ID])
if (inserts != []) :
self.__insertVMServerStatusData(inserts)
self.__vmServerResourceUsageData = []
self.__vmServerResourceUsageSegments = 0
def __insertVMServerStatusData(self, tupleList):
update = "INSERT INTO VirtualMachineServerStatus VALUES {0};"\
.format(ClusterEndpointDBConnector.__convertTuplesToSQLStr(tupleList))
self._executeUpdate(update)
def __updateVMServerStatusData(self, receivedData):
update = "UPDATE VirtualMachineServerStatus SET hosts = {0}, ramInUse = {1}, ramSize = {2},\
freeStorageSpace = {3}, availableStorageSpace = {4}, freeTemporarySpace = {5}, availableTemporarySpace = {6},\
activeVCPUs = {7}, physicalCPUs = {8} WHERE serverName = '{9}';".format(receivedData[1], receivedData[2], receivedData[3],
receivedData[4], receivedData[5], receivedData[6], receivedData[7],
receivedData[8], receivedData[9], receivedData[0])
self._executeUpdate(update)
def __deleteVMServerStatusData(self, serverName):
update = "DELETE FROM VirtualMachineServerStatus WHERE serverName = '{0}';".format(serverName)
self._executeUpdate(update)
def processVMServerSegment(self, segmentNumber, segmentCount, data):
"""
Processes a virtual machine server configuration segment
Args:
segmentNumber: the segment's position in the sequence
segmentCount: the number of segments in the sequence
data: the segment's data
Returns:
Nothing
"""
if (data != []) :
self.__vmServerSegmentsData += data
self.__vmServerSegments += 1
if (self.__vmServerSegments == segmentCount) :
receivedData = ClusterEndpointDBConnector.__getVMServersDictionary(self.__vmServerSegmentsData)
registeredIDs = self.__getKnownVMServerIDs()
if (registeredIDs != None) :
for ID in registeredIDs :
if not (receivedData.has_key(ID)) :
self.__deleteVMServer(ID)
inserts = []
for ID in receivedData.keys() :
if (registeredIDs != None and ID in registeredIDs) :
self.__updateVMServerData(receivedData[ID])
else :
inserts.append(receivedData[ID])
if (inserts != []) :
self.__insertVMServers(inserts)
self.__vmServerSegmentsData = []
self.__vmServerSegments = 0
def processImageCopiesDistributionSegment(self, segmentNumber, segmentCount, data):
"""
Processes an image copies distribution segment
Args:
segmentNumber: the segment's position in the sequence
segmentCount: the number of segments in the sequence
data: the segment's data
Returns:
Nothing
"""
if (data != []) :
self.__imageDistributionSegmentsData.append(data)
self.__imageDistributionSegments += 1
if (self.__imageDistributionSegments == segmentCount) :
# Borrar la tabla y volver a construirla
command = "DELETE FROM VirtualMachineDistribution;"
self._executeUpdate(command)
if (self.__imageDistributionSegmentsData != []) :
command = "INSERT INTO VirtualMachineDistribution VALUES " + ClusterEndpointDBConnector.__convertSegmentsToSQLTuples(self.__imageDistributionSegmentsData)
self.__imageDistributionSegmentsData = []
self.__imageDistributionSegments = 0
self._executeUpdate(command)
def processActiveVMVNCDataSegment(self, segmentNumber, segmentCount, vmServerIP, data):
"""
Processes an active virtual machines VNC data segment
Args:
segmentNumber: the segment's position in the sequence
segmentCount: the number of segments in the sequence
data: the segment's data
Returns:
Nothing
"""
if (not self.__activeVMSegmentsData.has_key(vmServerIP)) :
self.__activeVMSegmentsData[vmServerIP] = []
self.__activeVMSegments[vmServerIP] = 0
if (data != []) :
self.__activeVMSegmentsData[vmServerIP] += data
self.__activeVMSegments[vmServerIP] += 1
if (self.__activeVMSegments[vmServerIP] == segmentCount) :
receivedData = ClusterEndpointDBConnector.__getActiveVMsDictionary(self.__activeVMSegmentsData[vmServerIP])
registeredIDs = self.__getActiveVMIDs()
for ID in registeredIDs :
if not (receivedData.has_key(ID)) :
self.__deleteActiveVM(ID)
self.updateEditedImageState(ID, EDITION_STATE_T.TRANSFER_TO_REPOSITORY, EDITION_STATE_T.VM_ON)
inserts = []
for ID in receivedData.keys() :
if (not (ID in registeredIDs)) :
inserts.append(receivedData[ID])
if (inserts != []) :
self.__insertActiveVMData(self.__getVMServerName(vmServerIP), inserts)
self.__activeVMSegmentsData[vmServerIP] = []
self.__activeVMSegments[vmServerIP] = 0
@staticmethod
def __getVMServersDictionary(segmentList):
result = {}
for segment in segmentList :
result[segment[0]] = segment
return result
@staticmethod
def __getActiveVMsDictionary(segmentList):
result = {}
for segment in segmentList :
result[segment[0]] = segment
return result
def __getKnownVMServerIDs(self, table="VirtualMachineServer"):
query = "SELECT serverName FROM {0};".format(table)
result = set()
output = self._executeQuery(query, False)
if (output == None) :
return None
for t in output :
result.add(t)
return result
def __insertVMServers(self, tupleList):
update = "INSERT INTO VirtualMachineServer VALUES {0};"\
.format(ClusterEndpointDBConnector.__convertTuplesToSQLStr(tupleList))
self._executeUpdate(update)
def __updateVMServerData(self, data):
update = "UPDATE VirtualMachineServer SET serverStatus='{1}', serverIP='{2}', serverPort={3},\
isVanillaServer = {4} WHERE serverName='{0}'".format(data[0], data[1], data[2], data[3], data[4])
self._executeUpdate(update)
def __deleteVMServer(self, serverID):
update = "DELETE FROM ActiveVirtualMachines WHERE serverName = '{0}';".format(serverID)
self._executeUpdate(update)
update = "DELETE FROM VirtualMachineServer WHERE serverName = '{0}'".format(serverID)
self._executeUpdate(update)
def __getActiveVMIDs(self):
query = "SELECT domainUID FROM ActiveVirtualMachines;"
results = self._executeQuery(query)
if (results == None) :
return set()
output = set()
for row in results :
output.add(row)
return output
def __insertActiveVMData(self, vmServerIP, data):
update = "REPLACE ActiveVirtualMachines VALUES {0};"\
.format(ClusterEndpointDBConnector.__convertTuplesToSQLStr(data, [vmServerIP]))
self._executeUpdate(update)
def __deleteActiveVM(self, domainUID):
update = "DELETE FROM ActiveVirtualMachines WHERE domainUID = '{0}';"\
.format(domainUID)
self._executeUpdate(update)
def __getVMServerName(self, serverIP):
query = "SELECT serverName FROM VirtualMachineServer WHERE serverIP = '" + serverIP + "';"
result = self._executeQuery(query, True)
return str(result)
@staticmethod
def __convertTuplesToSQLStr(tupleList, dataToAdd = []):
isFirstSegment = True
command = ""
for segmentTuple in tupleList :
if (isFirstSegment) :
isFirstSegment = False
else :
command += ", "
segmentTuple_list = dataToAdd + list(segmentTuple)
command += str(tuple(segmentTuple_list))
return command
@staticmethod
def __convertSegmentsToSQLTuples(segmentList):
isFirstSegment = True
command = ""
for segment in segmentList :
for segmentTuple in segment :
if (isFirstSegment) :
isFirstSegment = False
else :
command += ", "
command += str(segmentTuple)
command += ";"
return command
def updateImageRepositoryStatus(self, freeDiskSpace, availableDiskSpace, status) :
"""
Updates the image repository's status
Args:
freeDiskSpace: the free disk space in the image repository
availableDiskSpace: the available disk space in the image repository
status: the image repository's connection status
Returns:
Nothing
"""
query = "SELECT * FROM ImageRepositoryStatus;"
result = self._executeQuery(query, True)
if (result == None) :
command = "INSERT INTO ImageRepositoryStatus VALUES (1, {0}, {1}, '{2}');".format(freeDiskSpace, availableDiskSpace, status)
else :
command = "UPDATE ImageRepositoryStatus SET freeDiskSpace = {0}, availableDiskSpace = {1}, repositoryStatus = '{2}';"\
.format(freeDiskSpace, availableDiskSpace, status)
self._executeUpdate(command)
def addNewImage(self, temporaryID, baseImageID, ownerID, imageName, imageDescription):
"""
Registers a new image in the database
Args:
temporaryID: an temporary image ID
baseImageID: an existing image ID
ownerID: the image owner's ID
imageName: the new image's name
imageDescription: the new image's description
Returns:
Nothing
"""
baseImageData = self.getImageData(baseImageID)
update = "INSERT INTO EditedImage VALUES('{0}', {1}, {2}, '{3}', '{4}', {5}, {6}, {7}, {8}, 0);"\
.format(temporaryID, baseImageData["VanillaImageFamilyID"], -1, imageName, imageDescription,
baseImageData["OSFamily"], baseImageData["OSVariant"], ownerID, EDITION_STATE_T.TRANSFER_TO_VM_SERVER)
self._executeUpdate(update)
def editImage(self, commandID, imageID, ownerID):
"""
Registers an edit images in the database
Args:
commandID: the image edition command's ID
imageID: the edited image's ID
ownerID: the image owner's ID
Returns:
Nothing
"""
query = "SELECT * from EditedImage WHERE imageID = {0};".format(imageID)
if (self._executeQuery(query, True) != None) :
update = "UPDATE EditedImage SET temporaryID = '{0}', state = {2} WHERE imageID = {1};".format(commandID, imageID, EDITION_STATE_T.TRANSFER_TO_VM_SERVER)
self._executeUpdate(update)
else :
imageData = self.getImageData(imageID)
update = "DELETE FROM Image WHERE imageID = {0};".format(imageID)
self._executeUpdate(update)
update = "INSERT INTO EditedImage VALUES('{0}', {1}, {2}, '{3}', '{4}', {5}, {6}, {7}, {8}, 1);"\
.format(commandID, imageData["VanillaImageFamilyID"], imageID, imageData["ImageName"], imageData["ImageDescription"],
imageData["OSFamily"], imageData["OSVariant"], ownerID, EDITION_STATE_T.TRANSFER_TO_VM_SERVER)
self._executeUpdate(update)
def moveRowToImage(self, temporaryID):
"""
Moves a row from the EditedImage table to the Image table
Args:
temporaryID: a temporary image ID
Returns:
Nothing
"""
imageData = self.getImageData(temporaryID)
update = "INSERT INTO Image VALUES ({0}, {1}, '{2}', '{3}', {4}, {5}, {6}, 1);"\
.format(imageData["ImageID"], imageData["VanillaImageFamilyID"], imageData["ImageName"], imageData["ImageDescription"],
imageData["OSFamily"], imageData["OSVariant"], int(imageData["IsBaseImage"]))
self._executeUpdate(update)
update = "DELETE FROM EditedImage WHERE temporaryID = '{0}';".format(temporaryID)
self._executeUpdate(update)
def deleteEditedImage(self, temporaryID):
"""
Deletes a new or an edited image from the database
Args:
temporaryID: a temporary image ID
Returns:
Nothing
"""
update = "DELETE FROM EditedImage WHERE temporaryID = '{0}';".format(temporaryID)
self._executeUpdate(update)
def deleteImage(self, imageID):
"""
Deletes an existing image from the database
Args:
imageID: the affected image's ID
Returns:
Nothing
"""
update = "DELETE FROM Image WHERE imageID = {0}".format(imageID)
self._executeUpdate(update)
def updateEditedImageState(self, temporaryID, newState, expectedState=None):
"""
Updates an edited image status in the database
Args:
temporaryID: the image's temporary ID
newState: the image's new state
expectedState: the image's expected state. If it's not none, the edited image state will only
be updated when its current state and expectedState are equals.
"""
if (expectedState != None) :
query = "SELECT state FROM EditedImage WHERE temporaryID = '{0}';".format(temporaryID)
if (self._executeQuery(query, True) != expectedState) :
return
update = "UPDATE EditedImage SET state = {1} WHERE temporaryID = '{0}';".format(temporaryID, newState)
self._executeUpdate(update)
def registerImageID(self, temporaryID, imageID):
"""
Registers an image ID in the database
Args:
temporaryID: the image's temporary ID
imageID: the image's ID
Returns:
Nothing
"""
update = "UPDATE EditedImage SET imageID = {1}, state = {2} WHERE temporaryID = '{0}';".format(temporaryID, imageID, EDITION_STATE_T.CHANGES_NOT_APPLIED)
self._executeUpdate(update)
def makeBootable(self, imageID):
"""
Marks an image as bootable
Args:
imageID: the affected image's ID
Returns:
Nothing
"""
update = "UPDATE Image SET isBootable = 1 WHERE imageID = {0};".format(imageID)
self._executeUpdate(update)
def __getDomainUID(self, serverName, ownerID, imageID):
query = "SELECT domainUID FROM ActiveVirtualMachines WHERE serverName = '{0}' AND ownerID = {1} AND imageID = {2};"\
.format(serverName, ownerID, imageID)
return self._executeQuery(query, True)
def unregisterDomain(self, domainUID):
"""
Unregisters a domain in the databse
Args:
domainUID: the domain's ID
Returns:
Nothing
"""
update = "DELETE FROM ActiveVirtualMachines WHERE domainUID = '{0}';".format(domainUID)
self._executeUpdate(update)
def affectsToNewOrEditedImage(self, autoDeploymentCommandID):
"""
Checks if an auto-deployment command affects to an image edition or an image creation command
Args:
autoDeploymentCommandID: the auto-deployment command's ID
Returns:
Nothing
"""
query = "SELECT * FROM EditedImage WHERE temporaryID = '{0}';".format(autoDeploymentCommandID)
return self._executeQuery(query, True) != None | [
"clusterEndpoint.databases.minimalClusterEndpointDBConnector.MinimalClusterEndpointDBConnector.__init__"
] | [((1459, 1547), 'clusterEndpoint.databases.minimalClusterEndpointDBConnector.MinimalClusterEndpointDBConnector.__init__', 'MinimalClusterEndpointDBConnector.__init__', (['self', 'sqlUser', 'sqlPassword', 'databaseName'], {}), '(self, sqlUser, sqlPassword,\n databaseName)\n', (1501, 1547), False, 'from clusterEndpoint.databases.minimalClusterEndpointDBConnector import MinimalClusterEndpointDBConnector\n')] |
from graphene_django import DjangoObjectType
import graphene
from graphql import GraphQLError
from graphql_jwt.decorators import login_required, superuser_required
from .models import SubAnimal_Table, Animal_Table
class SubAnimal_Info(DjangoObjectType):
class Meta:
model = SubAnimal_Table
fields = ("subAnimal", "animal", "acronym",)
class query(graphene.ObjectType):
all_SubAnimals = graphene.List(SubAnimal_Info)
def resolve_all_SubAnimals(root, info):
return SubAnimal_Table.objects.all()
class add_SubAnimal(graphene.Mutation):
new_Animal = graphene.Field(SubAnimal_Info)
class Arguments:
name = graphene.String(required=True)
animal = graphene.String(required=True)
@superuser_required
def mutate(self, info, name, animal):
parentAnimal = Animal_Table.objects.get(animal=animal)
animal = SubAnimal_Table(subAnimal=name , animal=parentAnimal)
animal.save()
return add_SubAnimal(new_Animal=animal)
class edit_SubAnimal(graphene.Mutation):
new_Animal = graphene.Field(SubAnimal_Info)
class Arguments:
name = graphene.String(required=True)
newName = graphene.String(required=True)
animal = graphene.String(required=True)
@superuser_required
def mutate(self, info, name, newName, animal):
if SubAnimal_Table.objects.filter(subAnimal=name).count() > 0:
parentAnimal = Animal_Table.objects.get(animal=animal)
animal = SubAnimal_Table.objects.get(subAnimal=name)
animal.subAnimal = newName
animal.animal = parentAnimal
animal.save()
return edit_SubAnimal(new_Animal=animal)
else:
return GraphQLError(name + "does not exist")
class delete_SubAnimal(graphene.Mutation):
old_Animal = graphene.String()
class Arguments:
name = graphene.String(required=True)
@superuser_required
def mutate(self, info, name):
if SubAnimal_Table.objects.filter(subAnimal=name).count() > 0:
animal = SubAnimal_Table.objects.get(subAnimal=name)
animal.delete()
return delete_SubAnimal(old_Animal= name + " has been deleted")
else:
return GraphQLError(name + "does not exist") | [
"graphene.Field",
"graphql.GraphQLError",
"graphene.List",
"graphene.String"
] | [((420, 449), 'graphene.List', 'graphene.List', (['SubAnimal_Info'], {}), '(SubAnimal_Info)\n', (433, 449), False, 'import graphene\n'), ((610, 640), 'graphene.Field', 'graphene.Field', (['SubAnimal_Info'], {}), '(SubAnimal_Info)\n', (624, 640), False, 'import graphene\n'), ((1113, 1143), 'graphene.Field', 'graphene.Field', (['SubAnimal_Info'], {}), '(SubAnimal_Info)\n', (1127, 1143), False, 'import graphene\n'), ((1909, 1926), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1924, 1926), False, 'import graphene\n'), ((682, 712), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (697, 712), False, 'import graphene\n'), ((730, 760), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (745, 760), False, 'import graphene\n'), ((1185, 1215), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (1200, 1215), False, 'import graphene\n'), ((1234, 1264), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (1249, 1264), False, 'import graphene\n'), ((1282, 1312), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (1297, 1312), False, 'import graphene\n'), ((1968, 1998), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (1983, 1998), False, 'import graphene\n'), ((1805, 1842), 'graphql.GraphQLError', 'GraphQLError', (["(name + 'does not exist')"], {}), "(name + 'does not exist')\n", (1817, 1842), False, 'from graphql import GraphQLError\n'), ((2352, 2389), 'graphql.GraphQLError', 'GraphQLError', (["(name + 'does not exist')"], {}), "(name + 'does not exist')\n", (2364, 2389), False, 'from graphql import GraphQLError\n')] |
#-*- coding: utf-8 -*-
import json
import requests
import unittest
import time
import sys
import datetime
from config import *
def request_post(req_data, is_assert=True):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
print('>> {} {}\n{}\n'.format(req_data['method'], req_data['params'], response))
if is_assert:
assert 'error' not in response
return response
#default:
# user issuer asset: AAAA
# bit asset : BBBB
def get_asset_if_not_create(symbol, is_bitasset=False):
print('[{}] symbol: {}'.format(sys._getframe().f_code.co_name, symbol))
req_data = {
"jsonrpc": "2.0",
"method": "get_asset",
"params": [symbol],
"id":1
}
# response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
response = request_post(req_data, False)
if 'error' in response:
precision = 5
asset_opts = {
"max_supply":"2100000000000000",
"market_fee_percent":0,
"max_market_fee":0,
"flags":0,
"core_exchange_rate":{
"base":{"amount":1,"asset_id":"1.3.3"},
"quote":{"amount":1,"asset_id":"1.3.0"}
},
"description":"",
"extensions":[]
}
bitasset_opts = None
if is_bitasset:
asset_opts = {
"issuer_permissions": 511,
"flags": 0,
"core_exchange_rate":{
"base":{"amount":1,"asset_id":"1.3.4"},
"quote":{"amount":1,"asset_id":"1.3.0"}
}
}
bitasset_opts = {
"new_feed_producers":[],
"feed_lifetime_sec":120
}
req_data = {
"jsonrpc": "2.0",
"method": "create_asset",
"params": [test_account, symbol, precision, asset_opts, bitasset_opts, 'true'],
"id":1
}
# response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
response = request_post(req_data)
# assert 'bitasset_data_id' in response
return response['result']
return response['result']
#generate random words by suggest_brain_key
def generate_random_words():
req_data = {
"jsonrpc": "2.0",
"method": "suggest_brain_key",
"params": [],
"id":1
}
suggest_brain_key = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)['result']
brain_key = suggest_brain_key['brain_priv_key']
return brain_key.split()
class request_unittest(unittest.TestCase):
def request_post(self, req_data):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
print('>> {} {}\n{}\n'.format(req_data['method'], req_data['params'], response))
self.assertFalse('error' in response)
return response
def request_post_error_asset_false(self, req_data):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
#print('>> {} {}\n{}\n'.format(req_data['method'], req_data['params'], response))
self.assertFalse('error' in response, '{} {} error'.format(req_data['method'], req_data['params']))
def request_post_error_asset_true(self, req_data):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
self.assertTrue('error' in response, '{} {} error'.format(req_data['method'], req_data['params']))
def request_post_result_asset_false(self, req_data):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
self.assertFalse('error' in response)
self.assertFalse(response['result'])
def request_post_result_asset_true(self, req_data):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
self.assertFalse('error' in response)
self.assertTrue(response['result'])
def request_post_result_asset_is_none(self, req_data):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
self.assertFalse('error' in response)
self.assertIsNone(response['result'])
def request_post_result_asset_in(self, req_data, first_or_second, is_first=True):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
self.assertFalse('error' in response)
if is_first:
self.assertIn(first_or_second, response['result'])
else:
self.assertIn(response['result'], first_or_second)
def request_post_result_asset_equal(self, req_data, value):
response = json.loads(requests.post(cli_wallet_url, data = json.dumps(req_data), headers = headers).text)
self.assertFalse('error' in response)
self.assertEqual(value, response['result'])
def datetime_N_ago(n):
n_ago = (datetime.datetime.now() - datetime.timedelta(days = n))
# return n_ago.strftime("%Y-%m-%d %H:%M:%S")
return n_ago
| [
"datetime.datetime.now",
"datetime.timedelta",
"json.dumps",
"sys._getframe"
] | [((5364, 5387), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5385, 5387), False, 'import datetime\n'), ((5390, 5416), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'n'}), '(days=n)\n', (5408, 5416), False, 'import datetime\n'), ((252, 272), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (262, 272), False, 'import json\n'), ((625, 640), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (638, 640), False, 'import sys\n'), ((2660, 2680), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (2670, 2680), False, 'import json\n'), ((2955, 2975), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (2965, 2975), False, 'import json\n'), ((3291, 3311), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (3301, 3311), False, 'import json\n'), ((3664, 3684), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (3674, 3684), False, 'import json\n'), ((3947, 3967), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (3957, 3967), False, 'import json\n'), ((4214, 4234), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (4224, 4234), False, 'import json\n'), ((4483, 4503), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (4493, 4503), False, 'import json\n'), ((4781, 4801), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (4791, 4801), False, 'import json\n'), ((5175, 5195), 'json.dumps', 'json.dumps', (['req_data'], {}), '(req_data)\n', (5185, 5195), False, 'import json\n')] |
###############################################################################
#
# Provides the required elements to implement a binary convolutional network in
# PyTorch.
#
# This file contains the following elements are implemented:
# * BinaryLinear
# * BinaryConv2d
# * sign function with straight-through estimator gradient
# * Binary optimization algorithm
#
# Inspiration taken from:
# https://github.com/itayhubara/BinaryNet.pytorch/blob/master/models/binarized_modules.py
#
# Author(s): <NAME>
###############################################################################
from typing import TypeVar, Union, Tuple, Optional, Callable
import torch
import torch.nn as nn
import torch.nn.functional as f
from torch import Tensor
from torch.autograd import Function
from torch.optim.optimizer import Optimizer
from torch.optim import Adam
################################################################################
# taken from https://github.com/pytorch/pytorch/blob/bfeff1eb8f90aa1ff7e4f6bafe9945ad409e2d97/torch/nn/common_types.pyi
T = TypeVar("T")
_scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
_size_2_t = _scalar_or_tuple_2_t[int]
################################################################################
# Quantizers
class Binarize(Function):
clip_value = 1
@staticmethod
def forward(ctx, inp):
ctx.save_for_backward(inp)
output = inp.sign()
return output
@staticmethod
def backward(ctx, grad_output):
inp: Tensor = ctx.saved_tensors[0]
clipped = inp.abs() <= Binarize.clip_value
output = torch.zeros(inp.size()).to(grad_output.device)
output[clipped] = 1
output[~clipped] = 0
return output * grad_output
binarize = Binarize.apply
################################################################################
# Optimizers for binary networks
class MomentumWithThresholdBinaryOptimizer(Optimizer):
def __init__(
self,
binary_params,
bn_params,
ar: float = 0.0001,
threshold: float = 0,
adam_lr=0.001,
):
if not 0 < ar < 1:
raise ValueError(
"given adaptivity rate {} is invalid; should be in (0, 1) (excluding endpoints)".format(
ar
)
)
if threshold < 0:
raise ValueError(
"given threshold {} is invalid; should be > 0".format(threshold)
)
self.total_weights = {}
self._adam = Adam(bn_params, lr=adam_lr)
defaults = dict(adaptivity_rate=ar, threshold=threshold)
super(MomentumWithThresholdBinaryOptimizer, self).__init__(
binary_params, defaults
)
def step(self, closure: Optional[Callable[[], float]] = ..., ar=None):
self._adam.step()
flips = {None}
for group in self.param_groups:
params = group["params"]
y = group["adaptivity_rate"]
t = group["threshold"]
flips = {}
if ar is not None:
y = ar
for param_idx, p in enumerate(params):
grad = p.grad.data
state = self.state[p]
if "moving_average" not in state:
m = state["moving_average"] = torch.clone(grad).detach()
else:
m: Tensor = state["moving_average"]
m.mul_((1 - y))
m.add_(grad.mul(y))
mask = (m.abs() >= t) * (m.sign() == p.sign())
mask = mask.double() * -1
mask[mask == 0] = 1
flips[param_idx] = (mask == -1).sum().item()
p.data.mul_(mask)
return flips
def zero_grad(self) -> None:
super().zero_grad()
self._adam.zero_grad()
################################################################################
# binary torch layers
class BinaryLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias=False,
keep_latent_weight=False,
binarize_input=False,
):
super().__init__(in_features, out_features, bias=bias)
self.keep_latent_weight = keep_latent_weight
self.binarize_input = binarize_input
if not self.keep_latent_weight:
with torch.no_grad():
self.weight.data.sign_()
self.bias.data.sign_() if self.bias is not None else None
def forward(self, inp: Tensor) -> Tensor:
if self.keep_latent_weight:
weight = binarize(self.weight)
else:
weight = self.weight
bias = self.bias if self.bias is None else binarize(self.bias)
if self.binarize_input:
inp = binarize(inp)
return f.linear(inp, weight, bias)
class BinaryConv2d(nn.Conv2d):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride=1,
padding=1,
bias=False,
keep_latent_weight=False,
binarize_input=False,
):
super().__init__(
in_channels, out_channels, kernel_size, stride, padding, bias=bias
)
self.keep_latent_weight = keep_latent_weight
self.binarize_input = binarize_input
if not self.keep_latent_weight:
with torch.no_grad():
self.weight.data.sign_()
self.bias.data.sign_() if self.bias is not None else None
def forward(self, inp: Tensor) -> Tensor:
if self.keep_latent_weight:
weight = binarize(self.weight)
else:
weight = self.weight
bias = self.bias if self.bias is None else binarize(self.bias)
if self.binarize_input:
inp = binarize(inp)
return f.conv2d(
inp, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
| [
"torch.nn.functional.linear",
"torch.optim.Adam",
"torch.nn.functional.conv2d",
"torch.clone",
"torch.no_grad",
"typing.TypeVar"
] | [((1056, 1068), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1063, 1068), False, 'from typing import TypeVar, Union, Tuple, Optional, Callable\n'), ((2519, 2546), 'torch.optim.Adam', 'Adam', (['bn_params'], {'lr': 'adam_lr'}), '(bn_params, lr=adam_lr)\n', (2523, 2546), False, 'from torch.optim import Adam\n'), ((4840, 4867), 'torch.nn.functional.linear', 'f.linear', (['inp', 'weight', 'bias'], {}), '(inp, weight, bias)\n', (4848, 4867), True, 'import torch.nn.functional as f\n'), ((5876, 5963), 'torch.nn.functional.conv2d', 'f.conv2d', (['inp', 'weight', 'bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(inp, weight, bias, self.stride, self.padding, self.dilation, self.\n groups)\n', (5884, 5963), True, 'import torch.nn.functional as f\n'), ((4382, 4397), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4395, 4397), False, 'import torch\n'), ((5418, 5433), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5431, 5433), False, 'import torch\n'), ((3312, 3329), 'torch.clone', 'torch.clone', (['grad'], {}), '(grad)\n', (3323, 3329), False, 'import torch\n')] |
import bpy
import os
import os.path
from pathlib import Path
def _mkdir(path):
try:
os.mkdir(path)
except:
pass
def path_components(path):
retval = []
base, end = os.path.split(path)
while end != '':
retval.insert(0, end)
base, end = os.path.split(base)
return retval
def find_project_root():
if bpy.data.filepath == '':
return None
path = os.path.split(bpy.data.filepath)
test_path = os.path.join(path[0], '.hecl')
while not os.path.exists(test_path):
path = os.path.split(path[0])
test_path = os.path.join(path[0], '.hecl')
if os.path.exists(test_path):
return path[0]
return None
def get_patching_dirs(make_dirs=False):
proj_root = find_project_root()
if not proj_root:
return None, None
rel_to_blend = os.path.relpath(bpy.data.filepath, start=proj_root)
rel_to_blend_comps = path_components(rel_to_blend)
trace_dir = os.path.join(proj_root, '.hecl', 'patches')
global_out = trace_dir
if not make_dirs and not os.path.exists(trace_dir):
return None, global_out
_mkdir(trace_dir)
for comp in rel_to_blend_comps:
ext_pair = os.path.splitext(comp)
if ext_pair[1] == '.blend':
trace_dir = os.path.join(trace_dir, ext_pair[0])
if not make_dirs and not os.path.exists(trace_dir):
return None, global_out
_mkdir(trace_dir)
return trace_dir, global_out
trace_dir = os.path.join(trace_dir, comp)
if not make_dirs and not os.path.exists(trace_dir):
return None, global_out
_mkdir(trace_dir)
class FILE_OT_hecl_patching_save(bpy.types.Operator):
'''Save text datablocks to hecl patching directory'''
bl_idname = "file.hecl_patching_save"
bl_label = "Save HECL Patches"
bl_options = {'REGISTER'}
def execute(self, context):
patching_dir, global_dir = get_patching_dirs(make_dirs=True)
if not patching_dir:
self.report({'WARNING'}, 'Unable to save patches for ' + bpy.data.filepath)
return {'CANCELLED'}
count = 0
for text in bpy.data.texts:
if not text.name.endswith('.py') or text.name.startswith('g_'):
continue
text_abspath = os.path.join(patching_dir, text.name)
text_file = open(text_abspath, 'w')
text_file.write(text.as_string())
text_file.close()
count += 1
if count == 1:
self.report({'INFO'}, 'saved 1 patch')
else:
self.report({'INFO'}, 'saved %d patches' % count)
return {'FINISHED'}
class FILE_OT_hecl_patching_load(bpy.types.Operator):
'''Load text datablocks from hecl patching directory'''
bl_idname = "file.hecl_patching_load"
bl_label = "Load HECL Patches"
bl_options = {'REGISTER'}
def execute(self, context):
patching_dir, global_dir = get_patching_dirs()
count = 0
# Locals
if patching_dir:
p = Path(patching_dir)
for path in p.glob('*.py'):
path = path.name
text_abspath = os.path.join(patching_dir, path)
text_file = open(text_abspath, 'r')
if path in bpy.data.texts:
text = bpy.data.texts[path]
else:
text = bpy.data.texts.new(path)
text.from_string(text_file.read())
text_file.close()
count += 1
# Globals
if global_dir:
p = Path(global_dir)
for path in p.glob('g_*.py'):
path = path.name
text_abspath = os.path.join(global_dir, path)
text_file = open(text_abspath, 'r')
if path in bpy.data.texts:
text = bpy.data.texts[path]
else:
text = bpy.data.texts.new(path)
text.from_string(text_file.read())
text_file.close()
count += 1
if count == 1:
self.report({'INFO'}, 'loaded 1 patch')
else:
self.report({'INFO'}, 'loaded %d patches' % count)
return {'FINISHED'}
def save_func(self, context):
self.layout.operator("file.hecl_patching_save", text="Save HECL Patches")
def load_func(self, context):
self.layout.operator("file.hecl_patching_load", text="Load HECL Patches")
def register():
bpy.utils.register_class(FILE_OT_hecl_patching_save)
bpy.utils.register_class(FILE_OT_hecl_patching_load)
bpy.types.TOPBAR_MT_file_external_data.append(load_func)
bpy.types.TOPBAR_MT_file_external_data.append(save_func)
def unregister():
bpy.utils.unregister_class(FILE_OT_hecl_patching_save)
bpy.utils.unregister_class(FILE_OT_hecl_patching_load)
bpy.types.TOPBAR_MT_file_external_data.remove(load_func)
bpy.types.TOPBAR_MT_file_external_data.remove(save_func)
| [
"bpy.utils.unregister_class",
"os.path.exists",
"bpy.types.TOPBAR_MT_file_external_data.append",
"bpy.types.TOPBAR_MT_file_external_data.remove",
"pathlib.Path",
"os.path.join",
"os.path.splitext",
"os.path.split",
"os.mkdir",
"bpy.data.texts.new",
"bpy.utils.register_class",
"os.path.relpath"
] | [((197, 216), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (210, 216), False, 'import os\n'), ((415, 447), 'os.path.split', 'os.path.split', (['bpy.data.filepath'], {}), '(bpy.data.filepath)\n', (428, 447), False, 'import os\n'), ((464, 494), 'os.path.join', 'os.path.join', (['path[0]', '""".hecl"""'], {}), "(path[0], '.hecl')\n", (476, 494), False, 'import os\n'), ((632, 657), 'os.path.exists', 'os.path.exists', (['test_path'], {}), '(test_path)\n', (646, 657), False, 'import os\n'), ((842, 893), 'os.path.relpath', 'os.path.relpath', (['bpy.data.filepath'], {'start': 'proj_root'}), '(bpy.data.filepath, start=proj_root)\n', (857, 893), False, 'import os\n'), ((965, 1008), 'os.path.join', 'os.path.join', (['proj_root', '""".hecl"""', '"""patches"""'], {}), "(proj_root, '.hecl', 'patches')\n", (977, 1008), False, 'import os\n'), ((4518, 4570), 'bpy.utils.register_class', 'bpy.utils.register_class', (['FILE_OT_hecl_patching_save'], {}), '(FILE_OT_hecl_patching_save)\n', (4542, 4570), False, 'import bpy\n'), ((4575, 4627), 'bpy.utils.register_class', 'bpy.utils.register_class', (['FILE_OT_hecl_patching_load'], {}), '(FILE_OT_hecl_patching_load)\n', (4599, 4627), False, 'import bpy\n'), ((4632, 4688), 'bpy.types.TOPBAR_MT_file_external_data.append', 'bpy.types.TOPBAR_MT_file_external_data.append', (['load_func'], {}), '(load_func)\n', (4677, 4688), False, 'import bpy\n'), ((4693, 4749), 'bpy.types.TOPBAR_MT_file_external_data.append', 'bpy.types.TOPBAR_MT_file_external_data.append', (['save_func'], {}), '(save_func)\n', (4738, 4749), False, 'import bpy\n'), ((4773, 4827), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['FILE_OT_hecl_patching_save'], {}), '(FILE_OT_hecl_patching_save)\n', (4799, 4827), False, 'import bpy\n'), ((4832, 4886), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['FILE_OT_hecl_patching_load'], {}), '(FILE_OT_hecl_patching_load)\n', (4858, 4886), False, 'import bpy\n'), ((4891, 4947), 'bpy.types.TOPBAR_MT_file_external_data.remove', 'bpy.types.TOPBAR_MT_file_external_data.remove', (['load_func'], {}), '(load_func)\n', (4936, 4947), False, 'import bpy\n'), ((4952, 5008), 'bpy.types.TOPBAR_MT_file_external_data.remove', 'bpy.types.TOPBAR_MT_file_external_data.remove', (['save_func'], {}), '(save_func)\n', (4997, 5008), False, 'import bpy\n'), ((97, 111), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (105, 111), False, 'import os\n'), ((288, 307), 'os.path.split', 'os.path.split', (['base'], {}), '(base)\n', (301, 307), False, 'import os\n'), ((509, 534), 'os.path.exists', 'os.path.exists', (['test_path'], {}), '(test_path)\n', (523, 534), False, 'import os\n'), ((551, 573), 'os.path.split', 'os.path.split', (['path[0]'], {}), '(path[0])\n', (564, 573), False, 'import os\n'), ((594, 624), 'os.path.join', 'os.path.join', (['path[0]', '""".hecl"""'], {}), "(path[0], '.hecl')\n", (606, 624), False, 'import os\n'), ((1201, 1223), 'os.path.splitext', 'os.path.splitext', (['comp'], {}), '(comp)\n', (1217, 1223), False, 'import os\n'), ((1516, 1545), 'os.path.join', 'os.path.join', (['trace_dir', 'comp'], {}), '(trace_dir, comp)\n', (1528, 1545), False, 'import os\n'), ((1065, 1090), 'os.path.exists', 'os.path.exists', (['trace_dir'], {}), '(trace_dir)\n', (1079, 1090), False, 'import os\n'), ((1284, 1320), 'os.path.join', 'os.path.join', (['trace_dir', 'ext_pair[0]'], {}), '(trace_dir, ext_pair[0])\n', (1296, 1320), False, 'import os\n'), ((2322, 2359), 'os.path.join', 'os.path.join', (['patching_dir', 'text.name'], {}), '(patching_dir, text.name)\n', (2334, 2359), False, 'import os\n'), ((3072, 3090), 'pathlib.Path', 'Path', (['patching_dir'], {}), '(patching_dir)\n', (3076, 3090), False, 'from pathlib import Path\n'), ((3615, 3631), 'pathlib.Path', 'Path', (['global_dir'], {}), '(global_dir)\n', (3619, 3631), False, 'from pathlib import Path\n'), ((1579, 1604), 'os.path.exists', 'os.path.exists', (['trace_dir'], {}), '(trace_dir)\n', (1593, 1604), False, 'import os\n'), ((3195, 3227), 'os.path.join', 'os.path.join', (['patching_dir', 'path'], {}), '(patching_dir, path)\n', (3207, 3227), False, 'import os\n'), ((3738, 3768), 'os.path.join', 'os.path.join', (['global_dir', 'path'], {}), '(global_dir, path)\n', (3750, 3768), False, 'import os\n'), ((1358, 1383), 'os.path.exists', 'os.path.exists', (['trace_dir'], {}), '(trace_dir)\n', (1372, 1383), False, 'import os\n'), ((3420, 3444), 'bpy.data.texts.new', 'bpy.data.texts.new', (['path'], {}), '(path)\n', (3438, 3444), False, 'import bpy\n'), ((3961, 3985), 'bpy.data.texts.new', 'bpy.data.texts.new', (['path'], {}), '(path)\n', (3979, 3985), False, 'import bpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# note that this needs the notmuch python bindings. For more info see:
# http://notmuchmail.org/howto/#index4h2
import notmuch
from i3pystatus.mail import Backend
class Notmuch(Backend):
"""
This class uses the notmuch python bindings to check for the
number of messages in the notmuch database with the tags "inbox"
and "unread"
"""
settings = required = ("db_path",)
def init(self):
self.db = notmuch.Database(self.db_path)
@property
def unread(self):
return notmuch.Query(self.db, "tag:unread and tag:inbox").count_messages()
Backend = Notmuch
| [
"notmuch.Database",
"notmuch.Query"
] | [((483, 513), 'notmuch.Database', 'notmuch.Database', (['self.db_path'], {}), '(self.db_path)\n', (499, 513), False, 'import notmuch\n'), ((566, 616), 'notmuch.Query', 'notmuch.Query', (['self.db', '"""tag:unread and tag:inbox"""'], {}), "(self.db, 'tag:unread and tag:inbox')\n", (579, 616), False, 'import notmuch\n')] |
import copy
import time
import threading
from queue import Queue
import logging
import functions.setting.setting_utils as su
from . import chunk_image_seq
class FillPatches(threading.Thread):
def __init__(self,
setting,
batch_size=None,
max_queue_size=None,
stage_sequence=None,
full_image=None,
chunk_length_force_to_multiple_of=None
):
if batch_size is None:
batch_size = setting['NetworkValidation']['BatchSize']
if max_queue_size is None:
max_queue_size = setting['NetworkValidation']['MaxQueueSize']
if stage_sequence is None:
stage_sequence = setting['stage_sequence']
if full_image is None:
full_image = setting['FullImage']
if chunk_length_force_to_multiple_of is None:
chunk_length_force_to_multiple_of = setting['NetworkValidation']['ChunkLengthForceToMultipleOf']
threading.Thread.__init__(self)
self.paused = False
self.pause_cond = threading.Condition(threading.Lock())
self.daemon = True
self._batch_size = batch_size
self._chunks_completed = False
self._PatchQueue = Queue(maxsize=max_queue_size)
im_list_info = su.get_im_info_list_from_train_mode(setting, train_mode='Validation')
self._reading = chunk_image_seq.Images(setting=setting,
class_mode='Direct',
number_of_images_per_chunk=setting['NetworkValidation']['NumberOfImagesPerChunk'],
samples_per_image=setting['NetworkValidation']['SamplesPerImage'],
im_info_list_full=im_list_info,
stage_sequence=stage_sequence,
train_mode='Validation',
full_image=full_image,
chunk_length_force_to_multiple_of=chunk_length_force_to_multiple_of)
self._reading.fill()
def run(self):
while True:
with self.pause_cond:
while self.paused:
self.pause_cond.wait()
try:
time_before_put = time.time()
item_queue = self._reading.next_batch(self._batch_size) + (copy.copy(self._reading._chunks_completed),)
self._PatchQueue.put(item_queue)
time_after_put = time.time()
logging.debug('ValQueue: put {:.2f} s'.format(time_after_put - time_before_put))
if self._reading._chunks_completed:
logging.debug('ValQueue: chunk is completed: resetValidation() ')
self._chunks_completed = True
self._reading.reset_validation()
if self._PatchQueue.full():
self.pause()
finally:
time.sleep(0.1)
def pause(self):
if not self.paused:
self.paused = True
# If in sleep, we acquire immediately, otherwise we wait for thread
# to release condition. In race, worker will still see self.paused
# and begin waiting until it's set back to False
self.pause_cond.acquire()
def resume(self):
if self.paused:
self.paused = False
# Notify so thread will wake after lock released
self.pause_cond.notify()
# Now release the lock
self.pause_cond.release()
| [
"threading.Thread.__init__",
"logging.debug",
"threading.Lock",
"time.sleep",
"copy.copy",
"queue.Queue",
"time.time",
"functions.setting.setting_utils.get_im_info_list_from_train_mode"
] | [((1008, 1039), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1033, 1039), False, 'import threading\n'), ((1263, 1292), 'queue.Queue', 'Queue', ([], {'maxsize': 'max_queue_size'}), '(maxsize=max_queue_size)\n', (1268, 1292), False, 'from queue import Queue\n'), ((1316, 1385), 'functions.setting.setting_utils.get_im_info_list_from_train_mode', 'su.get_im_info_list_from_train_mode', (['setting'], {'train_mode': '"""Validation"""'}), "(setting, train_mode='Validation')\n", (1351, 1385), True, 'import functions.setting.setting_utils as su\n'), ((1114, 1130), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1128, 1130), False, 'import threading\n'), ((2417, 2428), 'time.time', 'time.time', ([], {}), '()\n', (2426, 2428), False, 'import time\n'), ((2643, 2654), 'time.time', 'time.time', ([], {}), '()\n', (2652, 2654), False, 'import time\n'), ((3143, 3158), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3153, 3158), False, 'import time\n'), ((2836, 2901), 'logging.debug', 'logging.debug', (['"""ValQueue: chunk is completed: resetValidation() """'], {}), "('ValQueue: chunk is completed: resetValidation() ')\n", (2849, 2901), False, 'import logging\n'), ((2508, 2550), 'copy.copy', 'copy.copy', (['self._reading._chunks_completed'], {}), '(self._reading._chunks_completed)\n', (2517, 2550), False, 'import copy\n')] |
#####################################################################################
# MIT License #
# #
# Copyright (C) 2019 <NAME> #
# #
# This file is part of VQ-VAE-Speech. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from experiments.device_configuration import DeviceConfiguration
from experiments.pipeline_factory import PipelineFactory
from error_handling.console_logger import ConsoleLogger
import os
import yaml
import copy
class Experiment(object):
def __init__(self, name, experiments_path, results_path, global_configuration,
experiment_configuration, seed):
self._name = name
self._experiments_path = experiments_path
self._results_path = results_path
self._global_configuration = global_configuration
self._experiment_configuration = experiment_configuration
self._seed = seed
# Create the results path directory if it doesn't exist
if not os.path.isdir(results_path):
ConsoleLogger.status('Creating results directory at path: {}'.format(results_path))
os.mkdir(results_path)
else:
ConsoleLogger.status('Results directory already created at path: {}'.format(results_path))
# Create the experiments path directory if it doesn't exist
if not os.path.isdir(experiments_path):
ConsoleLogger.status('Creating experiments directory at path: {}'.format(experiments_path))
os.mkdir(experiments_path)
else:
ConsoleLogger.status('Experiments directory already created at path: {}'.format(experiments_path))
experiments_configuration_path = experiments_path + os.sep + name + '_configuration.yaml'
configuration_file_already_exists = True if os.path.isfile(experiments_configuration_path) else False
if not configuration_file_already_exists:
self._device_configuration = DeviceConfiguration.load_from_configuration(global_configuration)
# Create a new configuration state from the default and the experiment specific aspects
self._configuration = copy.deepcopy(self._global_configuration)
for experiment_key in experiment_configuration.keys():
if experiment_key in self._configuration:
self._configuration[experiment_key] = experiment_configuration[experiment_key]
# Save the configuration of the experiments
with open(experiments_configuration_path, 'w') as file:
yaml.dump(self._configuration, file)
else:
with open(experiments_configuration_path, 'r') as file:
self._configuration = yaml.load(file, Loader=yaml.FullLoader)
self._device_configuration = DeviceConfiguration.load_from_configuration(self._configuration)
if configuration_file_already_exists:
self._trainer, self._evaluator, self._configuration, self._device_configuration = \
PipelineFactory.load(self._experiments_path, self._name, self._results_path)
else:
self._trainer, self._evaluator = PipelineFactory.build(self._configuration,
self._device_configuration, self._experiments_path, self._name, self._results_path)
@property
def device_configuration(self):
return self._device_configuration
@property
def experiment_path(self):
return self._experiments_path
@property
def name(self):
return self._name
@property
def seed(self):
return self._seed
@property
def results_path(self):
return self._results_path
@property
def configuration(self):
return self._experiment_configuration
def train(self):
ConsoleLogger.status("Running the experiment called '{}'".format(self._name))
ConsoleLogger.status('Begins to train the model')
self._trainer.train()
ConsoleLogger.success("Succeed to runned the experiment called '{}'".format(self._name))
def evaluate(self, evaluation_options):
ConsoleLogger.status("Running the experiment called '{}'".format(self._name))
ConsoleLogger.status('Begins to evaluate the model')
self._evaluator.evaluate(evaluation_options)
ConsoleLogger.success("Succeed to runned the experiment called '{}'".format(self._name))
| [
"experiments.device_configuration.DeviceConfiguration.load_from_configuration",
"experiments.pipeline_factory.PipelineFactory.load",
"yaml.dump",
"yaml.load",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"error_handling.console_logger.ConsoleLogger.status",
"copy.deepcopy",
"experiments.pipeline_factory.PipelineFactory.build"
] | [((5780, 5829), 'error_handling.console_logger.ConsoleLogger.status', 'ConsoleLogger.status', (['"""Begins to train the model"""'], {}), "('Begins to train the model')\n", (5800, 5829), False, 'from error_handling.console_logger import ConsoleLogger\n'), ((6096, 6148), 'error_handling.console_logger.ConsoleLogger.status', 'ConsoleLogger.status', (['"""Begins to evaluate the model"""'], {}), "('Begins to evaluate the model')\n", (6116, 6148), False, 'from error_handling.console_logger import ConsoleLogger\n'), ((2883, 2910), 'os.path.isdir', 'os.path.isdir', (['results_path'], {}), '(results_path)\n', (2896, 2910), False, 'import os\n'), ((3020, 3042), 'os.mkdir', 'os.mkdir', (['results_path'], {}), '(results_path)\n', (3028, 3042), False, 'import os\n'), ((3244, 3275), 'os.path.isdir', 'os.path.isdir', (['experiments_path'], {}), '(experiments_path)\n', (3257, 3275), False, 'import os\n'), ((3393, 3419), 'os.mkdir', 'os.mkdir', (['experiments_path'], {}), '(experiments_path)\n', (3401, 3419), False, 'import os\n'), ((3696, 3742), 'os.path.isfile', 'os.path.isfile', (['experiments_configuration_path'], {}), '(experiments_configuration_path)\n', (3710, 3742), False, 'import os\n'), ((3845, 3910), 'experiments.device_configuration.DeviceConfiguration.load_from_configuration', 'DeviceConfiguration.load_from_configuration', (['global_configuration'], {}), '(global_configuration)\n', (3888, 3910), False, 'from experiments.device_configuration import DeviceConfiguration\n'), ((4046, 4087), 'copy.deepcopy', 'copy.deepcopy', (['self._global_configuration'], {}), '(self._global_configuration)\n', (4059, 4087), False, 'import copy\n'), ((4919, 4995), 'experiments.pipeline_factory.PipelineFactory.load', 'PipelineFactory.load', (['self._experiments_path', 'self._name', 'self._results_path'], {}), '(self._experiments_path, self._name, self._results_path)\n', (4939, 4995), False, 'from experiments.pipeline_factory import PipelineFactory\n'), ((5055, 5186), 'experiments.pipeline_factory.PipelineFactory.build', 'PipelineFactory.build', (['self._configuration', 'self._device_configuration', 'self._experiments_path', 'self._name', 'self._results_path'], {}), '(self._configuration, self._device_configuration, self\n ._experiments_path, self._name, self._results_path)\n', (5076, 5186), False, 'from experiments.pipeline_factory import PipelineFactory\n'), ((4453, 4489), 'yaml.dump', 'yaml.dump', (['self._configuration', 'file'], {}), '(self._configuration, file)\n', (4462, 4489), False, 'import yaml\n'), ((4610, 4649), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (4619, 4649), False, 'import yaml\n'), ((4695, 4759), 'experiments.device_configuration.DeviceConfiguration.load_from_configuration', 'DeviceConfiguration.load_from_configuration', (['self._configuration'], {}), '(self._configuration)\n', (4738, 4759), False, 'from experiments.device_configuration import DeviceConfiguration\n')] |
#!/usr/bin/env python
# Beware:
# - this script is executed using the system's python, so with not easy control on which
# packages are available. Same, we cannot directly install new ones using pip.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
####################################################################################################
# Default settings
settings = {
'requires_root': True,
}
####################################################################################################
# Imports
import os
import subprocess
import sys
# Do *not* use optparse or argparse here, we are not sure on which version of python we are!
####################################################################################################
# Utility functions
####################################################################################################
####################################################################################################
# Color terminal
class bcolors(object):
HEADER = '\033[95m'
OKBLUE = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BOOT = '\033[94m'
ENDC = '\033[0m'
# Do *not* use color when:
# - on windows
# - not in a terminal except if we are in Travis CI
if sys.platform.startswith('win32') or (not os.environ.get("TRAVIS") and not sys.stdout.isatty()):
bcolors.HEADER = ''
bcolors.OKBLUE = ''
bcolors.OKGREEN = ''
bcolors.WARNING = ''
bcolors.FAIL = ''
bcolors.BOLD = ''
bcolors.UNDERLINE = ''
bcolors.ENDC = ''
####################################################################################################
# Log functions
def flush():
sys.stdout.flush()
sys.stderr.flush()
def printNewSection(char):
print(bcolors.OKBLUE + char * 79)
flush()
def printInfo(text):
print(bcolors.OKBLUE + bcolors.OKBLUE + "[INFO ] " + bcolors.ENDC + text)
flush()
def printError(text):
print(bcolors.FAIL + "[ERROR ] " + bcolors.ENDC + text, file=sys.stderr)
flush()
def printCmd(text):
print(bcolors.OKGREEN + "[CMD ] " + bcolors.ENDC + text)
flush()
def printRootCmd(text):
print(bcolors.OKGREEN + "[ROOT CMD] " + bcolors.ENDC + text)
flush()
def printCmdBg(text):
print(bcolors.OKGREEN + "[CMD (bg)] " + bcolors.ENDC + text)
flush()
def printDetail(text):
print(bcolors.HEADER + "[DETAIL ] " + bcolors.ENDC + text)
flush()
def run(cmd, cwd=None, shell=False):
printCmd("{0}".format(" ".join(cmd)))
subprocess.check_call(cmd, shell=shell, cwd=cwd)
def runAsRoot(cmd, cwd=None, shell=False):
if os.geteuid() != 0:
cmd = ['sudo'] + cmd
printRootCmd("{0}".format(" ".join(cmd)))
else:
printCmd("(already root) {0}".format(" ".join(cmd)))
subprocess.check_call(cmd, shell=shell, cwd=cwd)
####################################################################################################
# run external tools methods
def call(cmd, cwd=None, shell=False):
printCmd("{0}".format(" ".join(cmd)))
return subprocess.call(cmd, shell=shell, cwd=cwd)
def runBackground(cmd, cwd=None, shell=False):
printCmdBg(" ".join(cmd))
subprocess.Popen(cmd, cwd=cwd, shell=shell)
def runAsRootIfNeeded(cmd, cwd=None, shell=False):
if settings['requires_root']:
runAsRoot(cmd, cwd=cwd, shell=shell)
else:
run(cmd, cwd=cwd, shell=shell)
####################################################################################################
# print usage to the user
def usage():
print("Usage: ./install/install.sh [-l|-d|-b|-h]")
print("")
print("Uninstall with './install/uninstall.py'")
####################################################################################################
# parse command line
action = "none"
if len(sys.argv) > 1:
args = sys.argv[:]
while args:
executable = args.pop(0)
cmd = args.pop(0)
if cmd == "-h":
usage()
sys.exit(0)
else:
raise Exception("Invalid parameter: {!r}".format(cmd))
else:
action = "default_install"
if sys.version_info < (2, 6):
raise "must use python 2.7.x. Current version is: {}.".format(sys.version_info)
####################################################################################################
# execute actions
printNewSection("=")
printInfo("Installing ppb command line tool")
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
src_path = os.path.join(root_path, "src")
printDetail("Installation source: {0}".format(src_path))
runAsRootIfNeeded(["pip", "install", "-e", src_path])
printInfo("Installation complete")
printNewSection("=")
| [
"subprocess.check_call",
"subprocess.Popen",
"sys.stderr.flush",
"os.path.join",
"sys.platform.startswith",
"os.geteuid",
"os.environ.get",
"os.path.dirname",
"sys.stdout.isatty",
"subprocess.call",
"sys.exit",
"sys.stdout.flush"
] | [((4674, 4704), 'os.path.join', 'os.path.join', (['root_path', '"""src"""'], {}), "(root_path, 'src')\n", (4686, 4704), False, 'import os\n'), ((1402, 1434), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (1425, 1434), False, 'import sys\n'), ((1827, 1845), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1843, 1845), False, 'import sys\n'), ((1850, 1868), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (1866, 1868), False, 'import sys\n'), ((2668, 2716), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': 'shell', 'cwd': 'cwd'}), '(cmd, shell=shell, cwd=cwd)\n', (2689, 2716), False, 'import subprocess\n'), ((2942, 2990), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': 'shell', 'cwd': 'cwd'}), '(cmd, shell=shell, cwd=cwd)\n', (2963, 2990), False, 'import subprocess\n'), ((3215, 3257), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': 'shell', 'cwd': 'cwd'}), '(cmd, shell=shell, cwd=cwd)\n', (3230, 3257), False, 'import subprocess\n'), ((3341, 3384), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'cwd', 'shell': 'shell'}), '(cmd, cwd=cwd, shell=shell)\n', (3357, 3384), False, 'import subprocess\n'), ((2769, 2781), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (2779, 2781), False, 'import os\n'), ((4624, 4649), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4639, 4649), False, 'import os\n'), ((1443, 1467), 'os.environ.get', 'os.environ.get', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (1457, 1467), False, 'import os\n'), ((1476, 1495), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (1493, 1495), False, 'import sys\n'), ((4148, 4159), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4156, 4159), False, 'import sys\n')] |
import sys
sys.path.insert(0,'./../bail-lib/')
import price
print (price.bithumb.BTC)
print(price.coinone.XRP)
print(price.korbit.ETH)
| [
"sys.path.insert"
] | [((11, 47), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./../bail-lib/"""'], {}), "(0, './../bail-lib/')\n", (26, 47), False, 'import sys\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
Author: <NAME> (<EMAIL>)
Created Date: 2019-10-07 8:47:35
-----
Last Modified: 2019-10-07 8:50:30
Modified By: <NAME> (<EMAIL>)
-----
THIS PROGRAM IS FREE SOFTWARE, IS LICENSED UNDER MIT.
A short and simple permissive license with conditions
only requiring preservation of copyright and license notices.
Copyright © 2019 <NAME>
-----
HISTORY:
Date By Comments
---------- -------- ---------------------------------------------------------
'''
import pytest
class DB:
def __init__(self):
self.intransaction = []
def begin(self, name):
self.intransaction.append(name)
def rollback(self):
self.intransaction.pop()
@pytest.fixture(scope="module")
def db():
return DB()
class TestClass:
@pytest.fixture(autouse=True)
def transact(self, request, db):
db.begin(request.function.__name__)
yield
db.rollback()
def test_method1(self, db):
assert db.intransaction == ["test_method1"]
def test_method2(self, db):
assert db.intransaction == ["test_method2"]
| [
"pytest.fixture"
] | [((722, 752), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (736, 752), False, 'import pytest\n'), ((803, 831), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (817, 831), False, 'import pytest\n')] |
from twisted.internet import process, protocol, defer
from twisted.python import failure
class PwgenException(Exception):
pass
class ReadPassword(protocol.ProcessProtocol):
def __init__(self, deferred, count=1):
self.deferred=deferred
self.count=count
self.stdout=''
self.stderr=''
def outReceived(self, data):
self.stdout=self.stdout+data
def errReceived(self, data):
self.stderr=self.stderr+data
def processEnded(self, reason):
if self.stderr:
self.deferred.errback(failure.Failure(
PwgenException(reason, self.stderr)))
elif self.stdout:
lines=[x for x in self.stdout.split('\n') if x]
if len(lines)!=self.count:
self.deferred.errback(failure.Failure(
PwgenException(reason, 'Wrong number of lines received.')))
self.deferred.callback(lines)
else:
self.deferred.errback(failure.Failure(PwgenException(reason, '')))
def generate(reactor, n=1):
assert n>0
d=defer.Deferred()
proto=ReadPassword(d, n)
process.Process(reactor, 'pwgen', ('pwgen', '-cn1', '-N', '%d'%n), {}, None, proto)
return d
if __name__=='__main__':
from twisted.internet import reactor
import sys
def say(passwords):
for p in passwords:
sys.stdout.write('%s\n' % p)
return passwords
def err(fail):
fail.trap(PwgenException)
sys.stderr.write('pwgen: %s\n' % fail.getErrorMessage())
# Could get more passwords in one fork, but this stresses it more
# on purpose.
l=[]
for i in range(5):
d=generate(reactor, 5)
d.addCallbacks(say, err)
l.append(d)
dl=defer.DeferredList(l)
dl.addBoth(lambda dummy: reactor.stop())
reactor.run()
| [
"twisted.internet.process.Process",
"twisted.internet.reactor.stop",
"twisted.internet.defer.DeferredList",
"twisted.internet.reactor.run",
"twisted.internet.defer.Deferred",
"sys.stdout.write"
] | [((1077, 1093), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (1091, 1093), False, 'from twisted.internet import process, protocol, defer\n'), ((1127, 1216), 'twisted.internet.process.Process', 'process.Process', (['reactor', '"""pwgen"""', "('pwgen', '-cn1', '-N', '%d' % n)", '{}', 'None', 'proto'], {}), "(reactor, 'pwgen', ('pwgen', '-cn1', '-N', '%d' % n), {},\n None, proto)\n", (1142, 1216), False, 'from twisted.internet import process, protocol, defer\n'), ((1756, 1777), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['l'], {}), '(l)\n', (1774, 1777), False, 'from twisted.internet import process, protocol, defer\n'), ((1827, 1840), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (1838, 1840), False, 'from twisted.internet import reactor\n'), ((1370, 1398), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\n' % p)"], {}), "('%s\\n' % p)\n", (1386, 1398), False, 'import sys\n'), ((1807, 1821), 'twisted.internet.reactor.stop', 'reactor.stop', ([], {}), '()\n', (1819, 1821), False, 'from twisted.internet import reactor\n')] |
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import light, output
from esphome.const import (
CONF_BLUE,
CONF_COLOR_INTERLOCK,
CONF_COLOR_TEMPERATURE,
CONF_GREEN,
CONF_RED,
CONF_OUTPUT_ID,
CONF_COLD_WHITE_COLOR_TEMPERATURE,
CONF_WARM_WHITE_COLOR_TEMPERATURE,
)
CODEOWNERS = ["@jesserockz"]
rgbct_ns = cg.esphome_ns.namespace("rgbct")
RGBCTLightOutput = rgbct_ns.class_("RGBCTLightOutput", light.LightOutput)
CONF_WHITE_BRIGHTNESS = "white_brightness"
CONFIG_SCHEMA = cv.All(
light.RGB_LIGHT_SCHEMA.extend(
{
cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(RGBCTLightOutput),
cv.Required(CONF_RED): cv.use_id(output.FloatOutput),
cv.Required(CONF_GREEN): cv.use_id(output.FloatOutput),
cv.Required(CONF_BLUE): cv.use_id(output.FloatOutput),
cv.Required(CONF_COLOR_TEMPERATURE): cv.use_id(output.FloatOutput),
cv.Required(CONF_WHITE_BRIGHTNESS): cv.use_id(output.FloatOutput),
cv.Required(CONF_COLD_WHITE_COLOR_TEMPERATURE): cv.color_temperature,
cv.Required(CONF_WARM_WHITE_COLOR_TEMPERATURE): cv.color_temperature,
cv.Optional(CONF_COLOR_INTERLOCK, default=False): cv.boolean,
}
),
light.validate_color_temperature_channels,
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_OUTPUT_ID])
await light.register_light(var, config)
red = await cg.get_variable(config[CONF_RED])
cg.add(var.set_red(red))
green = await cg.get_variable(config[CONF_GREEN])
cg.add(var.set_green(green))
blue = await cg.get_variable(config[CONF_BLUE])
cg.add(var.set_blue(blue))
color_temp = await cg.get_variable(config[CONF_COLOR_TEMPERATURE])
cg.add(var.set_color_temperature(color_temp))
white_brightness = await cg.get_variable(config[CONF_WHITE_BRIGHTNESS])
cg.add(var.set_white_brightness(white_brightness))
cg.add(var.set_cold_white_temperature(config[CONF_COLD_WHITE_COLOR_TEMPERATURE]))
cg.add(var.set_warm_white_temperature(config[CONF_WARM_WHITE_COLOR_TEMPERATURE]))
cg.add(var.set_color_interlock(config[CONF_COLOR_INTERLOCK]))
| [
"esphome.codegen.new_Pvariable",
"esphome.codegen.get_variable",
"esphome.config_validation.Optional",
"esphome.components.light.register_light",
"esphome.config_validation.Required",
"esphome.config_validation.declare_id",
"esphome.codegen.esphome_ns.namespace",
"esphome.config_validation.GenerateID",
"esphome.config_validation.use_id"
] | [((382, 414), 'esphome.codegen.esphome_ns.namespace', 'cg.esphome_ns.namespace', (['"""rgbct"""'], {}), "('rgbct')\n", (405, 414), True, 'import esphome.codegen as cg\n'), ((1382, 1422), 'esphome.codegen.new_Pvariable', 'cg.new_Pvariable', (['config[CONF_OUTPUT_ID]'], {}), '(config[CONF_OUTPUT_ID])\n', (1398, 1422), True, 'import esphome.codegen as cg\n'), ((1433, 1466), 'esphome.components.light.register_light', 'light.register_light', (['var', 'config'], {}), '(var, config)\n', (1453, 1466), False, 'from esphome.components import light, output\n'), ((1484, 1517), 'esphome.codegen.get_variable', 'cg.get_variable', (['config[CONF_RED]'], {}), '(config[CONF_RED])\n', (1499, 1517), True, 'import esphome.codegen as cg\n'), ((1565, 1600), 'esphome.codegen.get_variable', 'cg.get_variable', (['config[CONF_GREEN]'], {}), '(config[CONF_GREEN])\n', (1580, 1600), True, 'import esphome.codegen as cg\n'), ((1651, 1685), 'esphome.codegen.get_variable', 'cg.get_variable', (['config[CONF_BLUE]'], {}), '(config[CONF_BLUE])\n', (1666, 1685), True, 'import esphome.codegen as cg\n'), ((1741, 1788), 'esphome.codegen.get_variable', 'cg.get_variable', (['config[CONF_COLOR_TEMPERATURE]'], {}), '(config[CONF_COLOR_TEMPERATURE])\n', (1756, 1788), True, 'import esphome.codegen as cg\n'), ((1868, 1914), 'esphome.codegen.get_variable', 'cg.get_variable', (['config[CONF_WHITE_BRIGHTNESS]'], {}), '(config[CONF_WHITE_BRIGHTNESS])\n', (1883, 1914), True, 'import esphome.codegen as cg\n'), ((615, 644), 'esphome.config_validation.GenerateID', 'cv.GenerateID', (['CONF_OUTPUT_ID'], {}), '(CONF_OUTPUT_ID)\n', (628, 644), True, 'import esphome.config_validation as cv\n'), ((691, 712), 'esphome.config_validation.Required', 'cv.Required', (['CONF_RED'], {}), '(CONF_RED)\n', (702, 712), True, 'import esphome.config_validation as cv\n'), ((757, 780), 'esphome.config_validation.Required', 'cv.Required', (['CONF_GREEN'], {}), '(CONF_GREEN)\n', (768, 780), True, 'import esphome.config_validation as cv\n'), ((825, 847), 'esphome.config_validation.Required', 'cv.Required', (['CONF_BLUE'], {}), '(CONF_BLUE)\n', (836, 847), True, 'import esphome.config_validation as cv\n'), ((892, 927), 'esphome.config_validation.Required', 'cv.Required', (['CONF_COLOR_TEMPERATURE'], {}), '(CONF_COLOR_TEMPERATURE)\n', (903, 927), True, 'import esphome.config_validation as cv\n'), ((972, 1006), 'esphome.config_validation.Required', 'cv.Required', (['CONF_WHITE_BRIGHTNESS'], {}), '(CONF_WHITE_BRIGHTNESS)\n', (983, 1006), True, 'import esphome.config_validation as cv\n'), ((1051, 1097), 'esphome.config_validation.Required', 'cv.Required', (['CONF_COLD_WHITE_COLOR_TEMPERATURE'], {}), '(CONF_COLD_WHITE_COLOR_TEMPERATURE)\n', (1062, 1097), True, 'import esphome.config_validation as cv\n'), ((1133, 1179), 'esphome.config_validation.Required', 'cv.Required', (['CONF_WARM_WHITE_COLOR_TEMPERATURE'], {}), '(CONF_WARM_WHITE_COLOR_TEMPERATURE)\n', (1144, 1179), True, 'import esphome.config_validation as cv\n'), ((1215, 1263), 'esphome.config_validation.Optional', 'cv.Optional', (['CONF_COLOR_INTERLOCK'], {'default': '(False)'}), '(CONF_COLOR_INTERLOCK, default=False)\n', (1226, 1263), True, 'import esphome.config_validation as cv\n'), ((646, 677), 'esphome.config_validation.declare_id', 'cv.declare_id', (['RGBCTLightOutput'], {}), '(RGBCTLightOutput)\n', (659, 677), True, 'import esphome.config_validation as cv\n'), ((714, 743), 'esphome.config_validation.use_id', 'cv.use_id', (['output.FloatOutput'], {}), '(output.FloatOutput)\n', (723, 743), True, 'import esphome.config_validation as cv\n'), ((782, 811), 'esphome.config_validation.use_id', 'cv.use_id', (['output.FloatOutput'], {}), '(output.FloatOutput)\n', (791, 811), True, 'import esphome.config_validation as cv\n'), ((849, 878), 'esphome.config_validation.use_id', 'cv.use_id', (['output.FloatOutput'], {}), '(output.FloatOutput)\n', (858, 878), True, 'import esphome.config_validation as cv\n'), ((929, 958), 'esphome.config_validation.use_id', 'cv.use_id', (['output.FloatOutput'], {}), '(output.FloatOutput)\n', (938, 958), True, 'import esphome.config_validation as cv\n'), ((1008, 1037), 'esphome.config_validation.use_id', 'cv.use_id', (['output.FloatOutput'], {}), '(output.FloatOutput)\n', (1017, 1037), True, 'import esphome.config_validation as cv\n')] |
"""isort:skip_file."""
import argparse
import os
import sys
sys.path.append("../")
import numpy as np
import torch
from beta_rec.core.train_engine import TrainEngine
from beta_rec.models.sgl import SGLEngine
from beta_rec.utils.monitor import Monitor
def parse_args():
"""Parse args from command line.
Returns:
args object.
"""
parser = argparse.ArgumentParser(description="Run SGL..")
parser.add_argument(
"--config_file",
nargs="?",
type=str,
default="../configs/sgl_default.json",
help="Specify the config file name. Only accept a file from ../configs/",
)
# If the following settings are specified with command line,
# These settings will used to update the parameters received from the config file.
parser.add_argument(
"--emb_dim", nargs="?", type=int, help="Dimension of the embedding."
)
parser.add_argument(
"--tune",
nargs="?",
type=str,
default=False,
help="Tun parameter",
)
parser.add_argument("--lr", nargs="?", type=float, help="Initialize learning rate.")
parser.add_argument("--max_epoch", nargs="?", type=int, help="Number of max epoch.")
parser.add_argument(
"--batch_size", nargs="?", type=int, help="Batch size for training."
)
return parser.parse_args()
def _convert_sp_mat_to_sp_tensor(X):
coo = X.tocoo().astype(np.float32)
indices = np.mat([coo.row, coo.col])
return torch.sparse_coo_tensor(torch.tensor(indices), coo.data, coo.shape)
def _convert_csr_to_sparse_tensor_inputs(X):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col])
return indices, coo.data, coo.shape
class SGL_train(TrainEngine):
"""An instance class from the TrainEngine base class."""
def __init__(self, config):
"""Initialize SGL_train Class.
Args:
config (dict): All the parameters for the model.
"""
self.config = config
super(SGL_train, self).__init__(config)
self.load_dataset()
self.build_data_loader()
self.engine = SGLEngine(self.config)
def build_data_loader(self):
self.config["model"]["n_users"] = self.data.n_users
self.config["model"]["n_items"] = self.data.n_items
norm_adj = self.data.create_sgl_mat(self.config)
self.config["model"]["norm_adj"] = norm_adj
def train(self):
"""Train the model."""
self.monitor = Monitor(
log_dir=self.config["system"]["run_dir"], delay=1, gpu_id=self.gpu_id
)
self.model_save_dir = os.path.join(
self.config["system"]["model_save_dir"], self.config["model"]["save_name"]
)
self.engine = SGLEngine(self.config)
train_loader = self.data.instance_bpr_loader(
batch_size=self.config["model"]["batch_size"],
device=self.config["model"]["device_str"],
)
self._train(self.engine, train_loader, self.model_save_dir)
self.config["run_time"] = self.monitor.stop()
return self.eval_engine.best_valid_performance
if __name__ == "__main__":
args = parse_args()
train_engine = SGL_train(args)
train_engine.train()
train_engine.test()
| [
"beta_rec.models.sgl.SGLEngine",
"numpy.mat",
"beta_rec.utils.monitor.Monitor",
"argparse.ArgumentParser",
"os.path.join",
"torch.tensor",
"sys.path.append"
] | [((61, 83), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (76, 83), False, 'import sys\n'), ((367, 415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run SGL.."""'}), "(description='Run SGL..')\n", (390, 415), False, 'import argparse\n'), ((1447, 1473), 'numpy.mat', 'np.mat', (['[coo.row, coo.col]'], {}), '([coo.row, coo.col])\n', (1453, 1473), True, 'import numpy as np\n'), ((1634, 1660), 'numpy.mat', 'np.mat', (['[coo.row, coo.col]'], {}), '([coo.row, coo.col])\n', (1640, 1660), True, 'import numpy as np\n'), ((1509, 1530), 'torch.tensor', 'torch.tensor', (['indices'], {}), '(indices)\n', (1521, 1530), False, 'import torch\n'), ((2114, 2136), 'beta_rec.models.sgl.SGLEngine', 'SGLEngine', (['self.config'], {}), '(self.config)\n', (2123, 2136), False, 'from beta_rec.models.sgl import SGLEngine\n'), ((2476, 2554), 'beta_rec.utils.monitor.Monitor', 'Monitor', ([], {'log_dir': "self.config['system']['run_dir']", 'delay': '(1)', 'gpu_id': 'self.gpu_id'}), "(log_dir=self.config['system']['run_dir'], delay=1, gpu_id=self.gpu_id)\n", (2483, 2554), False, 'from beta_rec.utils.monitor import Monitor\n'), ((2607, 2700), 'os.path.join', 'os.path.join', (["self.config['system']['model_save_dir']", "self.config['model']['save_name']"], {}), "(self.config['system']['model_save_dir'], self.config['model'][\n 'save_name'])\n", (2619, 2700), False, 'import os\n'), ((2740, 2762), 'beta_rec.models.sgl.SGLEngine', 'SGLEngine', (['self.config'], {}), '(self.config)\n', (2749, 2762), False, 'from beta_rec.models.sgl import SGLEngine\n')] |
import numpy as np
class BaseTransform(object):
"""
Base class for transforms, clipping, normalize, centralize, standardize etc.
Note that all inherited class should support only scalar value or 1-dim vector.
Because it has much higher risk to introduce bugs with larger dimensionality.
It is recommended to convert all numpy processed data to type, np.float32
becuase it is more compatible with PyTorch. Numpy default float64 often
can lead to numerical issues or raised exceptions in PyTorch. Similarly
for np.int32.
"""
def __call__(self, x):
"""
Process the input data
Args:
x (scalar/list/ndarray): input data
Returns:
out: The processed data
"""
raise NotImplementedError
def make_input(self, x):
"""
Conver the input as scalar or 1-dim ndarray
1. scalar: retain the same
2. list: convert to 1-dim ndarray with shape [D]
Args:
x (scalar/list/ndarray): input data
Returns:
x (ndarray): converted data
"""
# Enforce tuple becomes list
if isinstance(x, tuple):
x = list(x)
if np.isscalar(x) or isinstance(x, (list, np.ndarray)): # scalar, list or ndarray
x = np.array(x)
# Convert to type of int32 or float32 with compatibility to PyTorch
if x.dtype == np.int:
x = x.astype(np.int32)
elif x.dtype == np.float:
x = x.astype(np.float32)
if x.ndim <= 1:
return x
else:
raise ValueError('Only scalar or 1-dim vector are supported. ')
else:
raise TypeError('Only following types are supported: scalar, list, ndarray. ') | [
"numpy.array",
"numpy.isscalar"
] | [((1299, 1313), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (1310, 1313), True, 'import numpy as np\n'), ((1395, 1406), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1403, 1406), True, 'import numpy as np\n')] |
# pylint: disable=bad-indentation
import logging
import importlib
import traceback
from pathlib import Path
import numpy as np
import SimpleITK as sitk
import vtk, qt, ctk, slicer
import sitkUtils as su
from slicer.ScriptedLoadableModule import (
ScriptedLoadableModule,
ScriptedLoadableModuleWidget,
ScriptedLoadableModuleLogic,
ScriptedLoadableModuleTest,
)
try:
import torchio
except ImportError:
repoDir = Path('~/git/resector').expanduser()
slicer.util.pip_install(f'--editable {repoDir}')
import resector
class Resector(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Resector"
self.parent.categories = ["Utilities"]
self.parent.dependencies = []
self.parent.contributors = [
"<NAME> (University College London)",
]
self.parent.helpText = """[This is the help text.]
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink(
docPage='https://github.com/fepegar/resector')
self.parent.acknowledgementText = """
University College London.
"""
class ResectorWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.logic = ResectorLogic()
self.makeGUI()
self.onVolumeSelectorModified()
slicer.torchio = self
import SampleData
SampleData.downloadSample('MRHead')
def makeGUI(self):
self.addNodesButton()
self.addApplyButton()
# Add vertical spacer
self.layout.addStretch(1)
def addNodesButton(self):
self.nodesButton = ctk.ctkCollapsibleButton()
self.nodesButton.text = 'Volumes'
self.layout.addWidget(self.nodesButton)
nodesLayout = qt.QFormLayout(self.nodesButton)
self.inputSelector = slicer.qMRMLNodeComboBox()
self.inputSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = True
self.inputSelector.noneEnabled = False
self.inputSelector.setMRMLScene(slicer.mrmlScene)
self.inputSelector.currentNodeChanged.connect(self.onVolumeSelectorModified)
nodesLayout.addRow('Input volume: ', self.inputSelector)
self.inputLabelSelector = slicer.qMRMLNodeComboBox()
self.inputLabelSelector.nodeTypes = ['vtkMRMLLabelMapVolumeNode', 'vtkMRMLSegmentationNode']
self.inputLabelSelector.addEnabled = False
self.inputLabelSelector.removeEnabled = True
self.inputLabelSelector.noneEnabled = False
self.inputLabelSelector.setMRMLScene(slicer.mrmlScene)
self.inputLabelSelector.currentNodeChanged.connect(self.onVolumeSelectorModified)
nodesLayout.addRow('Parcellation: ', self.inputLabelSelector)
self.outputSelector = slicer.qMRMLNodeComboBox()
self.outputSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.outputSelector.addEnabled = False
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.setMRMLScene(slicer.mrmlScene)
self.outputSelector.noneDisplay = 'Create new volume'
self.outputSelector.currentNodeChanged.connect(self.onVolumeSelectorModified)
nodesLayout.addRow('Output volume: ', self.outputSelector)
def addApplyButton(self):
self.applyButton = qt.QPushButton('Apply')
self.layout.addWidget(self.applyButton)
def onVolumeSelectorModified(self):
self.applyButton.setDisabled(
self.inputSelector.currentNode() is None
or self.currentTransform is None
)
def onApplyButton(self):
inputVolumeNode = self.inputSelector.currentNode()
inputLabelNode = self.inputLabelSelector.currentNode()
outputVolumeNode = self.outputSelector.currentNode()
if outputVolumeNode is None:
name = f'{inputVolumeNode.GetName()} {self.currentTransform.name}'
outputVolumeNode = slicer.mrmlScene.AddNewNodeByClass(
'vtkMRMLScalarVolumeNode',
name,
)
self.outputSelector.currentNodeID = outputVolumeNode.GetID()
try:
kwargs = self.currentTransform.getKwargs()
logging.info(f'Transform args: {kwargs}')
outputImage = self.currentTransform(inputVolumeNode)
except Exception as e:
tb = traceback.format_exc()
message = (
f'Resector returned the error: {tb}'
f'\n\nTransform kwargs:\n{kwargs}'
)
slicer.util.errorDisplay(message)
return
su.PushVolumeToSlicer(outputImage, targetNode=outputVolumeNode)
slicer.util.setSliceViewerLayers(background=outputVolumeNode)
class ResectorLogic(ScriptedLoadableModuleLogic):
pass
class ResectorTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_Resector1()
def test_Resector1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import SampleData
SampleData.downloadFromURL(
nodeNames='FA',
fileNames='FA.nrrd',
uris='http://slicer.kitware.com/midas3/download?items=5767',
checksums='SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
self.delayDisplay('Test passed!')
| [
"slicer.ScriptedLoadableModule.ScriptedLoadableModule.__init__",
"traceback.format_exc",
"sitkUtils.PushVolumeToSlicer",
"slicer.mrmlScene.Clear",
"qt.QPushButton",
"slicer.mrmlScene.AddNewNodeByClass",
"pathlib.Path",
"slicer.util.errorDisplay",
"slicer.ScriptedLoadableModule.ScriptedLoadableModuleWidget.setup",
"ctk.ctkCollapsibleButton",
"logging.info",
"slicer.util.pip_install",
"slicer.util.getNode",
"qt.QFormLayout",
"slicer.util.setSliceViewerLayers",
"SampleData.downloadSample",
"SampleData.downloadFromURL",
"slicer.qMRMLNodeComboBox"
] | [((464, 512), 'slicer.util.pip_install', 'slicer.util.pip_install', (['f"""--editable {repoDir}"""'], {}), "(f'--editable {repoDir}')\n", (487, 512), False, 'import vtk, qt, ctk, slicer\n'), ((608, 653), 'slicer.ScriptedLoadableModule.ScriptedLoadableModule.__init__', 'ScriptedLoadableModule.__init__', (['self', 'parent'], {}), '(self, parent)\n', (639, 653), False, 'from slicer.ScriptedLoadableModule import ScriptedLoadableModule, ScriptedLoadableModuleWidget, ScriptedLoadableModuleLogic, ScriptedLoadableModuleTest\n'), ((1192, 1232), 'slicer.ScriptedLoadableModule.ScriptedLoadableModuleWidget.setup', 'ScriptedLoadableModuleWidget.setup', (['self'], {}), '(self)\n', (1226, 1232), False, 'from slicer.ScriptedLoadableModule import ScriptedLoadableModule, ScriptedLoadableModuleWidget, ScriptedLoadableModuleLogic, ScriptedLoadableModuleTest\n'), ((1373, 1408), 'SampleData.downloadSample', 'SampleData.downloadSample', (['"""MRHead"""'], {}), "('MRHead')\n", (1398, 1408), False, 'import SampleData\n'), ((1591, 1617), 'ctk.ctkCollapsibleButton', 'ctk.ctkCollapsibleButton', ([], {}), '()\n', (1615, 1617), False, 'import vtk, qt, ctk, slicer\n'), ((1718, 1750), 'qt.QFormLayout', 'qt.QFormLayout', (['self.nodesButton'], {}), '(self.nodesButton)\n', (1732, 1750), False, 'import vtk, qt, ctk, slicer\n'), ((1777, 1803), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (1801, 1803), False, 'import vtk, qt, ctk, slicer\n'), ((2223, 2249), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (2247, 2249), False, 'import vtk, qt, ctk, slicer\n'), ((2729, 2755), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (2753, 2755), False, 'import vtk, qt, ctk, slicer\n'), ((3261, 3284), 'qt.QPushButton', 'qt.QPushButton', (['"""Apply"""'], {}), "('Apply')\n", (3275, 3284), False, 'import vtk, qt, ctk, slicer\n'), ((4382, 4445), 'sitkUtils.PushVolumeToSlicer', 'su.PushVolumeToSlicer', (['outputImage'], {'targetNode': 'outputVolumeNode'}), '(outputImage, targetNode=outputVolumeNode)\n', (4403, 4445), True, 'import sitkUtils as su\n'), ((4450, 4511), 'slicer.util.setSliceViewerLayers', 'slicer.util.setSliceViewerLayers', ([], {'background': 'outputVolumeNode'}), '(background=outputVolumeNode)\n', (4482, 4511), False, 'import vtk, qt, ctk, slicer\n'), ((4958, 4983), 'slicer.mrmlScene.Clear', 'slicer.mrmlScene.Clear', (['(0)'], {}), '(0)\n', (4980, 4983), False, 'import vtk, qt, ctk, slicer\n'), ((5901, 6120), 'SampleData.downloadFromURL', 'SampleData.downloadFromURL', ([], {'nodeNames': '"""FA"""', 'fileNames': '"""FA.nrrd"""', 'uris': '"""http://slicer.kitware.com/midas3/download?items=5767"""', 'checksums': '"""SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560"""'}), "(nodeNames='FA', fileNames='FA.nrrd', uris=\n 'http://slicer.kitware.com/midas3/download?items=5767', checksums=\n 'SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')\n", (5927, 6120), False, 'import SampleData\n'), ((6213, 6246), 'slicer.util.getNode', 'slicer.util.getNode', ([], {'pattern': '"""FA"""'}), "(pattern='FA')\n", (6232, 6246), False, 'import vtk, qt, ctk, slicer\n'), ((3825, 3892), 'slicer.mrmlScene.AddNewNodeByClass', 'slicer.mrmlScene.AddNewNodeByClass', (['"""vtkMRMLScalarVolumeNode"""', 'name'], {}), "('vtkMRMLScalarVolumeNode', name)\n", (3859, 3892), False, 'import vtk, qt, ctk, slicer\n'), ((4049, 4090), 'logging.info', 'logging.info', (['f"""Transform args: {kwargs}"""'], {}), "(f'Transform args: {kwargs}')\n", (4061, 4090), False, 'import logging\n'), ((426, 448), 'pathlib.Path', 'Path', (['"""~/git/resector"""'], {}), "('~/git/resector')\n", (430, 448), False, 'from pathlib import Path\n'), ((4188, 4210), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4208, 4210), False, 'import traceback\n'), ((4331, 4364), 'slicer.util.errorDisplay', 'slicer.util.errorDisplay', (['message'], {}), '(message)\n', (4355, 4364), False, 'import vtk, qt, ctk, slicer\n')] |
import sys
import pathlib
import numpy as np
from collections import namedtuple
from scipy import interpolate
import math
from .. import API as ls # Careful with this circular import
#%%═════════════════════════════════════════════════════════════════════
# SETUP
path_data = pathlib.Path(__file__).parent.absolute() / 'data'
Reference = namedtuple('Reference',
['raw','atol','cmethod','ostyle','compressed'],
defaults=[1e-5,'interp10','monolith',None])
#%%═════════════════════════════════════════════════════════════════════
# FUNCTIONS
##%%════════════════════════════════════════════════════════════════════
## Auxiliary functions
##%%═════════════════════════════════════════════════════════════════════
## Reference functions
def f2zero_100(n: int) -> float:
'''returns < 0 for values 0 to 100 and >0 for values > 100'''
if round(n) != n: raise ValueError('Not whole number')
if n < 0: raise ValueError('Input must be >= 0')
return np.sqrt(n) - 10.01, True
#%%═════════════════════════════════════════════════════════════════════
# Reference raw data
def raw_poly0(n = 1e1):
x = np.linspace(0,1,int(n))
return x, np.zeros(len(x))
#───────────────────────────────────────────────────────────────────────
def raw_poly1(n = 1e1):
x = np.linspace(0,1,int(n))
return x, x
#───────────────────────────────────────────────────────────────────────
def raw_poly2(n = 1e2):
x = np.linspace(0,1,int(n))
return x, np.array(x**2)
#───────────────────────────────────────────────────────────────────────
def raw_sine(n = 1e4):
x = np.linspace(0,6,int(n))
return x, np.array(np.sin(x*2*math.pi))
#───────────────────────────────────────────────────────────────────────
def raw_sine_x2(n = 1e4):
x = np.linspace(0,6,int(n))
return x, np.array(np.sin(x*x))
#───────────────────────────────────────────────────────────────────────
def raw_sine_normal(n = 1e4, std=0.1):
rng = np.random.default_rng(12345)
x = np.linspace(0,1,int(n))
return x, np.array(np.sin(x*2*math.pi)) + std*rng.standard_normal(int(n))
#───────────────────────────────────────────────────────────────────────
raw = {'poly0': raw_poly0,
'poly1': raw_poly1,
'poly2': raw_poly2,
'sine': raw_sine}
###═════════════════════════════════════════════════════════════════════
class Data():
'''Data container'''
def __init__(self, function, ytol=1e-2):
self.x, self.y = raw[function]()
self.y_range = np.max(self.y) - np.min(self.y)
self.ytol = ytol
self.xc = None
self.yc = None
#───────────────────────────────────────────────────────────────────
def make_lerp(self):
self.lerp = interpolate.interp1d(self.xc.flatten(), self.yc.flatten(),
assume_sorted=True)
self.residuals = self.lerp(self.x) - self.y
self.residuals_relative = self.residuals / self.ytol
self.residuals_relative_cumulative = np.cumsum(self.residuals_relative)
self.NRMSE = np.std(self.residuals)/self.y_range
self.covariance = np.cov((self.lerp(self.x), self.y))
#───────────────────────────────────────────────────────────────────────
references = [Reference(raw['poly0'],1e-5,'interp10','monolith')]
#───────────────────────────────────────────────────────────────────────
def generate(function, method, ytol=5e-2):
data = Data(function, ytol=ytol)
data.xc, data.yc, _ = ls.compress(data.x, data.y, method=method, ytol=data.ytol)
data.make_lerp()
print(np.amax(np.abs(data.residuals_relative)))
np.savetxt(path_data / (function+'_'+method+'.csv'),
np.concatenate((data.xc, data.yc), axis=1), delimiter=',', header='hmm')
return data
| [
"numpy.abs",
"collections.namedtuple",
"numpy.sqrt",
"numpy.random.default_rng",
"pathlib.Path",
"numpy.std",
"numpy.max",
"numpy.array",
"numpy.concatenate",
"numpy.min",
"numpy.sin",
"numpy.cumsum"
] | [((341, 468), 'collections.namedtuple', 'namedtuple', (['"""Reference"""', "['raw', 'atol', 'cmethod', 'ostyle', 'compressed']"], {'defaults': "[1e-05, 'interp10', 'monolith', None]"}), "('Reference', ['raw', 'atol', 'cmethod', 'ostyle', 'compressed'],\n defaults=[1e-05, 'interp10', 'monolith', None])\n", (351, 468), False, 'from collections import namedtuple\n'), ((1978, 2006), 'numpy.random.default_rng', 'np.random.default_rng', (['(12345)'], {}), '(12345)\n', (1999, 2006), True, 'import numpy as np\n'), ((1502, 1518), 'numpy.array', 'np.array', (['(x ** 2)'], {}), '(x ** 2)\n', (1510, 1518), True, 'import numpy as np\n'), ((3028, 3062), 'numpy.cumsum', 'np.cumsum', (['self.residuals_relative'], {}), '(self.residuals_relative)\n', (3037, 3062), True, 'import numpy as np\n'), ((3704, 3746), 'numpy.concatenate', 'np.concatenate', (['(data.xc, data.yc)'], {'axis': '(1)'}), '((data.xc, data.yc), axis=1)\n', (3718, 3746), True, 'import numpy as np\n'), ((1008, 1018), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1015, 1018), True, 'import numpy as np\n'), ((1668, 1691), 'numpy.sin', 'np.sin', (['(x * 2 * math.pi)'], {}), '(x * 2 * math.pi)\n', (1674, 1691), True, 'import numpy as np\n'), ((1843, 1856), 'numpy.sin', 'np.sin', (['(x * x)'], {}), '(x * x)\n', (1849, 1856), True, 'import numpy as np\n'), ((2526, 2540), 'numpy.max', 'np.max', (['self.y'], {}), '(self.y)\n', (2532, 2540), True, 'import numpy as np\n'), ((2543, 2557), 'numpy.min', 'np.min', (['self.y'], {}), '(self.y)\n', (2549, 2557), True, 'import numpy as np\n'), ((3084, 3106), 'numpy.std', 'np.std', (['self.residuals'], {}), '(self.residuals)\n', (3090, 3106), True, 'import numpy as np\n'), ((3598, 3629), 'numpy.abs', 'np.abs', (['data.residuals_relative'], {}), '(data.residuals_relative)\n', (3604, 3629), True, 'import numpy as np\n'), ((278, 300), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (290, 300), False, 'import pathlib\n'), ((2062, 2085), 'numpy.sin', 'np.sin', (['(x * 2 * math.pi)'], {}), '(x * 2 * math.pi)\n', (2068, 2085), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Filter out all points in Switzerland
"""
import csv
import sys
import os
import argparse
from decimal import *
DELIMITER = os.getenv('DELIMITER', ' ')
NORTH = Decimal('89.930459')
WEST = Decimal('19.1')
EAST = Decimal('179.9')
SOUTH = Decimal('0.1')
def in_switzerland(coords):
lat, lng = coords
return lat < NORTH and lat > SOUTH and lng > WEST and lng < EAST
if __name__ == '__main__':
reader = csv.reader(sys.stdin, delimiter=DELIMITER)
writer = csv.writer(sys.stdout, delimiter=DELIMITER)
for row in reader:
z = int(row[0])
x = int(row[1])
y = int(row[2])
wkt = str(row[3])
v = int(row[4])
lat = Decimal(row[5])
lng = Decimal(row[6])
if in_switzerland((lat, lng)):
writer.writerow([z, x, y, wkt, v, lat, lng])
| [
"csv.writer",
"csv.reader",
"os.getenv"
] | [((151, 178), 'os.getenv', 'os.getenv', (['"""DELIMITER"""', '""" """'], {}), "('DELIMITER', ' ')\n", (160, 178), False, 'import os\n'), ((441, 483), 'csv.reader', 'csv.reader', (['sys.stdin'], {'delimiter': 'DELIMITER'}), '(sys.stdin, delimiter=DELIMITER)\n', (451, 483), False, 'import csv\n'), ((497, 540), 'csv.writer', 'csv.writer', (['sys.stdout'], {'delimiter': 'DELIMITER'}), '(sys.stdout, delimiter=DELIMITER)\n', (507, 540), False, 'import csv\n')] |
from gtfparse import read_gtf
import dinopy as dp
from dinopy.conversion import string_to_bytes
from dinopy.processors import reverse_complement
from pyfaidx import Fasta
def gtf2fasta(genome_fa_path, gtf_path):
"""
read exonic gtf and reference genome fasta, and obtain transcript id and sequence
"""
# read
fa = Fasta(genome_fa_path, read_ahead=1000)
df = read_gtf(gtf_path)
# filter: exon
df_exon = df[df["feature"] == "exon"]
# get exon seq (time bottleneck) # TODO get seq only for redundant exon
seq = [string_to_bytes(fa.get_seq(c, s, e).seq) for c, s, e in zip(df_exon['seqname'].values.tolist(), df_exon['start'].values.tolist(), df_exon['end'].values.tolist())]
# summarise key
key=df_exon['transcript_id'].values.tolist()
key_lag = ['']
key_lag.extend(key[:-1])
tx_change = [k != kl for k, kl in zip(key, key_lag)]
tx_change_idx = [i for i, val in enumerate(tx_change) if val]
tx_change_idx.append(len(key) + 1)
# summarise tx_id
tx_id = [key[i] for i in tx_change_idx[:-1]]
# summarise tx-level seq
tx_seq_fwd = [b''.join(seq[s:e]) for s, e in zip(tx_change_idx[:-1], tx_change_idx[1:])]
# summarise tx-level strand
exon_strand = df_exon['strand'].values.tolist()
tx_strand = [exon_strand[i] for i in tx_change_idx[:-1]]
# reverse complement
tx_seq = [seq if strand == '+' else reverse_complement(seq) for seq, strand in zip(tx_seq_fwd, tx_strand)]
return (tx_id, tx_seq)
def write_fasta(tx_id, tx_seq, fa_out):
"""
write out fasta file.
"""
with open(fa_out, mode='w') as f:
for txid, txseq in zip(tx_id, tx_seq):
f.write(">" + txid + "\n" + txseq.decode("utf-8") + "\n")
def read_reference_lady(input_path):
"""
Read a reference tx in fasta format by using pyfaidx.
Return the reference as a list of tuples (pyfaidx_fasta_object, number_of_tx, reference_lengths),
Reference_lengths has the same order with pyfaidx_fasta_object.keys().
"""
fp = dp.FastaReader(input_path)
reference_lengths, reference_names, reference_seqs = [], [], []
for sequence, name, length, interval in fp.entries(dtype=bytearray):
name = name.decode("utf-8").replace(" ", "_")
reference_names.append(name)
reference_lengths.append(length)
reference_seqs.append(sequence)
n_ref = len(reference_lengths)
return (reference_names, reference_seqs, reference_lengths, n_ref)
# def fastarecord2bytes(fasta_record):
# return string_to_bytes(fasta_record[:].seq)
# def fasta2ref(tx_id, tx_seq):
# """
# input: transcript id and sequence
# output: same as read_reference() in simlord
# """
# transcripts, weights, reference_names, reference_lengths = [], [], [], []
# max_chrom_length = 0
# for name, seq in zip(tx_id, tx_seq):
# name = name.replace(" ", "_")
# length = len(seq)
# reference_names.append(name)
# reference_lengths.append(length)
# if length > 1:
# transcripts.append((seq, name, length, 0))
# weights.append(length)
# if length > max_chrom_length:
# max_chrom_length = length
# sum_w = sum(weights)
# weights = [x/sum_w for x in weights]
# return (transcripts, reference_names, reference_lengths, max_chrom_length, weights)
| [
"dinopy.processors.reverse_complement",
"gtfparse.read_gtf",
"dinopy.FastaReader",
"pyfaidx.Fasta"
] | [((335, 373), 'pyfaidx.Fasta', 'Fasta', (['genome_fa_path'], {'read_ahead': '(1000)'}), '(genome_fa_path, read_ahead=1000)\n', (340, 373), False, 'from pyfaidx import Fasta\n'), ((383, 401), 'gtfparse.read_gtf', 'read_gtf', (['gtf_path'], {}), '(gtf_path)\n', (391, 401), False, 'from gtfparse import read_gtf\n'), ((2035, 2061), 'dinopy.FastaReader', 'dp.FastaReader', (['input_path'], {}), '(input_path)\n', (2049, 2061), True, 'import dinopy as dp\n'), ((1397, 1420), 'dinopy.processors.reverse_complement', 'reverse_complement', (['seq'], {}), '(seq)\n', (1415, 1420), False, 'from dinopy.processors import reverse_complement\n')] |
import torch
import torch.nn as nn
import torchvision.transforms.functional as TF
# In original paper the output shape is not similar to the input shape
# Here we are going to make both similar that's why I decided to put padding in Convolutional layer
class ConvCat(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvCat, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv(x)
class Unet(nn.Module):
def __init__(self, in_channels, out_channels):
super(Unet, self).__init__()
self.list = [64, 128, 256, 512]
self.ups = nn.ModuleList()
self.down = nn.ModuleList()
self.pool = nn.MaxPool2d(2, 2)
self.f_out = nn.Conv2d(self.list[0], out_channels, 1)
for feature in self.list:
self.down.append(ConvCat(in_channels, feature))
in_channels = feature
for feature in reversed(self.list):
self.ups.append(nn.ConvTranspose2d(feature*2, feature, 2, 2))
self.ups.append(ConvCat(feature*2, feature))
self.bottom_layer = ConvCat(self.list[-1], self.list[-1]*2)
def forward(self, x):
# for the skip connections
activation = []
'''Conv and max-pool layers'''
for down in self.down:
x = down(x)
activation.append(x)
x = self.pool(x)
x = self.bottom_layer(x)
# reversing the order of activation layers for adding
# these as a skip connection
activation = activation[::-1]
'''Transpose Conv and conv layers'''
for idx in range(0, len(self.ups), 2):
x = self.ups[idx](x)
active = activation[idx//2]
# here we should reshape the activation output (according to the paper)
# but we are reshaping the trans conv layer
if active.shape != x.shape:
x = TF.resize(x, size=active.shape[2:])
# adding the skip connection
x = torch.cat((active, x), dim=1)
# again conv-cat series
x = self.ups[idx+1](x)
# the out layer
x = self.f_out(x)
return x
if __name__ == '__main__':
a = torch.zeros((2, 1, 160, 160)).to('cuda')
model = Unet(1, 4).to(device='cuda')
print(model(a).shape)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torchvision.transforms.functional.resize",
"torch.nn.ConvTranspose2d",
"torch.zeros",
"torch.cat"
] | [((963, 978), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (976, 978), True, 'import torch.nn as nn\n'), ((1000, 1015), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1013, 1015), True, 'import torch.nn as nn\n'), ((1037, 1055), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1049, 1055), True, 'import torch.nn as nn\n'), ((1078, 1118), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.list[0]', 'out_channels', '(1)'], {}), '(self.list[0], out_channels, 1)\n', (1087, 1118), True, 'import torch.nn as nn\n'), ((428, 485), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(in_channels, out_channels, 3, 1, 1, bias=False)\n', (437, 485), True, 'import torch.nn as nn\n'), ((500, 528), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (514, 528), True, 'import torch.nn as nn\n'), ((543, 564), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (550, 564), True, 'import torch.nn as nn\n'), ((579, 637), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(out_channels, out_channels, 3, 1, 1, bias=False)\n', (588, 637), True, 'import torch.nn as nn\n'), ((652, 680), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (666, 680), True, 'import torch.nn as nn\n'), ((695, 716), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (702, 716), True, 'import torch.nn as nn\n'), ((2408, 2437), 'torch.cat', 'torch.cat', (['(active, x)'], {'dim': '(1)'}), '((active, x), dim=1)\n', (2417, 2437), False, 'import torch\n'), ((2624, 2653), 'torch.zeros', 'torch.zeros', (['(2, 1, 160, 160)'], {}), '((2, 1, 160, 160))\n', (2635, 2653), False, 'import torch\n'), ((1328, 1374), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(feature * 2)', 'feature', '(2)', '(2)'], {}), '(feature * 2, feature, 2, 2)\n', (1346, 1374), True, 'import torch.nn as nn\n'), ((2311, 2346), 'torchvision.transforms.functional.resize', 'TF.resize', (['x'], {'size': 'active.shape[2:]'}), '(x, size=active.shape[2:])\n', (2320, 2346), True, 'import torchvision.transforms.functional as TF\n')] |
import re, os
from discord.ext import commands
import kurisu.prefs
import datetime, sqlite3
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
def cache_size():
try:
stdout = os.popen('du -h /home/pi/MusicBot/audio_cache').readline()
return "%s" % stdout.split()[0].replace(',', '.')
except:
return "недоступен"
class FGL:
"""Просто все подряд, десу"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def help(self, ctx, *cmd: str):
"""Показывает данное сообщение"""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(cmd) == 0:
helpEmbed = bot.formatter.format_help_for(ctx, bot)
elif len(cmd) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, cmd[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
await bot.send_message(destination, "Команда %s не найдена." % name)
return
helpEmbed = bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, cmd[0])
command = bot.commands.get(name)
if command is None:
await bot.send_message(destination, "Команда %s не найдена." % name)
return
for key in cmd[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
await bot.send_message(destination, "Подкоманда %s не найдена." % key)
return
except AttributeError:
await bot.send_message(destination, "Команда %s не имеет подкоманд." % name)
return
helpEmbed = bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
await bot.send_message(destination, embed=helpEmbed)
@commands.command()
async def status(self):
"""Возвращает информацию о хост-машине"""
stats = kurisu.prefs.info()
emb = kurisu.prefs.Embeds.new('normal')
emb.add_field(name = 'Статистика', value='CPU: {d[0]}%\nRAM Total: {d[1]}MB\nRAM Used: {d[2]}MB\nTemp: {d[4]}`C\nUptime: {d[5]}'.format(d=stats))
emb.add_field(name = 'Моэка', value='Кэш: %s' % cache_size())
await self.bot.say(embed = emb)
@commands.command(pass_context=True)
async def info(self, ctx, *users: str):
"""Возвращает информацию о пользователе
Аргументы:
-----------
users: [`discord.Member`]
Массив упоминаний пользователей.
Если нет ни одного упоминания, используется автор сообщения.
"""
if len(ctx.message.mentions) == 0:
users = [ctx.message.author]
else:
users = ctx.message.mentions
for u in users:
emb = kurisu.prefs.Embeds.new('normal')
emb.colour = u.colour
emb.title = '%s%s%s' % (u, "" if (u.name == u.display_name) else (" a.k.a %s" % u.display_name), u.bot and " [BOT]" or '')
emb.add_field(name="ID:", value=u.id, inline=False)
embDays = (datetime.datetime.now() - u.joined_at).days
def isYa(num):
return (num%10 > 1) and (num%10 < 5) and ((num//10 == 0) or (num//10 > 1))
def dateParse(days):
res = ''
years, days = days//365, days%365
if years > 0:
if years == 1:
res = "1 год"
elif (years > 1) and (years < 5):
res = "%s года" % years
else:
res = "%s лет" % years
months, days = days//30, days%30
if months > 0:
if months == 1:
res = ', '.join([res, "1 месяц"])
elif isYa(months):
res = ', '.join([res, "%s месяца" % months])
else:
res = ', '.join([res, "%s месяцев" % months])
if days > 0:
if days == 1:
res = ', '.join([res, "1 день"])
elif isYa(days):
res = ', '.join([res, "%s дня" % days])
else:
res = ', '.join([res, "%s дней" % days])
if res.startswith(', '):
res = res[2:]
if res.startswith(', '):
res = res[2:]
if res == '':
res = 'Ни одного дня'
return res
emb.add_field(name="На сервере:", value=dateParse(embDays), inline=False)
r, g, b = u.top_role.colour.to_tuple()
emb.add_field(name="Основная роль:", value='%s (#%02x%02x%02x)' % ((u.top_role.name == "@everyone" and "Без роли" or u.top_role.name), r, g, b), inline=True)
if kurisu.prefs.Roles.alpaca in u.roles:
conn = sqlite3.connect('db.sqlite3')
cursor = conn.cursor()
cursor.execute('select * from alpaca where userID = %s limit 1' % u.id)
a = cursor.fetchall()
t = datetime.datetime.fromtimestamp(a[0][2]) - datetime.timedelta(hours=3)
pt = kurisu.prefs.parse_time(t.timetuple())
pt = '%s %s' % (pt[0], pt[1])
emb.add_field(name="Альпакамен", value="до %s" % pt, inline=True)
roles = [r.name for r in u.roles[::-1][:-1] if r != u.top_role]
if len(roles) > 0:
emb.add_field(name="Остальные роли", value=", ".join(roles), inline=False)
emb.set_thumbnail(url=kurisu.prefs.avatar_url(u))
await self.bot.say(embed=emb)
def setup(bot):
bot.remove_command("help")
bot.add_cog(FGL(bot))
| [
"datetime.datetime.fromtimestamp",
"sqlite3.connect",
"datetime.datetime.now",
"os.popen",
"datetime.timedelta",
"discord.ext.commands.command"
] | [((518, 553), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (534, 553), False, 'from discord.ext import commands\n'), ((2162, 2180), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2178, 2180), False, 'from discord.ext import commands\n'), ((2574, 2609), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (2590, 2609), False, 'from discord.ext import commands\n'), ((285, 332), 'os.popen', 'os.popen', (['"""du -h /home/pi/MusicBot/audio_cache"""'], {}), "('du -h /home/pi/MusicBot/audio_cache')\n", (293, 332), False, 'import re, os\n'), ((4584, 4613), 'sqlite3.connect', 'sqlite3.connect', (['"""db.sqlite3"""'], {}), "('db.sqlite3')\n", (4599, 4613), False, 'import datetime, sqlite3\n'), ((3247, 3270), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3268, 3270), False, 'import datetime, sqlite3\n'), ((4751, 4791), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['a[0][2]'], {}), '(a[0][2])\n', (4782, 4791), False, 'import datetime, sqlite3\n'), ((4794, 4821), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(3)'}), '(hours=3)\n', (4812, 4821), False, 'import datetime, sqlite3\n')] |
""" Unit tests for quality assessment
"""
import unittest
import logging
from arl.data.data_models import QA
log = logging.getLogger(__name__)
class TestQualityAssessment(unittest.TestCase):
def test_qa(self):
qa = QA(origin='foo', data={'rms': 100.0, 'median': 10.0}, context='test of qa')
log.debug(str(qa))
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"arl.data.data_models.QA",
"unittest.main"
] | [((119, 146), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (136, 146), False, 'import logging\n'), ((374, 389), 'unittest.main', 'unittest.main', ([], {}), '()\n', (387, 389), False, 'import unittest\n'), ((238, 313), 'arl.data.data_models.QA', 'QA', ([], {'origin': '"""foo"""', 'data': "{'rms': 100.0, 'median': 10.0}", 'context': '"""test of qa"""'}), "(origin='foo', data={'rms': 100.0, 'median': 10.0}, context='test of qa')\n", (240, 313), False, 'from arl.data.data_models import QA\n')] |
from cadCAD_tools.execution import easy_run
from cadCAD_tools.preparation import sweep_cartesian_product
from baseline_model.types import GrowthScenario
import pandas as pd
from baseline_model.params import RAW_PARAMS
def standard_run() -> pd.DataFrame:
from baseline_model import INITIAL_STATE, PARAMS, BLOCKS, TIMESTEPS, SAMPLES
# Simulations
# Set 1 of simulations: alternate growth scenarios
# TODO: make sure that it matches the scoped experiment on alternate growth
# scenarios
set_1_args = (INITIAL_STATE, PARAMS, BLOCKS, TIMESTEPS, SAMPLES)
set_1_df = easy_run(*set_1_args).assign(set='alternate_growth')
# Set 2 of simulations: network power = baseline
set_2_initial_state = INITIAL_STATE
set_2_initial_state['network_power'] = set_2_initial_state['baseline']
set_2_params = RAW_PARAMS.copy()
set_2_params['baseline_activated'] = [True]
set_2_params['network_power_scenario'] = [GrowthScenario('baseline')]
set_2_params = sweep_cartesian_product(set_2_params)
set_2_args = (set_2_initial_state, set_2_params,
BLOCKS, TIMESTEPS, SAMPLES)
set_2_df = easy_run(*set_2_args).assign(set='baseline')
# Post Processing
raw_df = pd.concat([set_1_df, set_2_df])
dfs = [raw_df,
raw_df.reward.map(lambda x: x.__dict__).apply(pd.Series),
raw_df.network_power_scenario.map(lambda x: x.__dict__).apply(pd.Series)]
DROP_COLS = ['reward', 'network_power_scenario']
df = (pd.concat(dfs,
axis=1)
.drop(columns=DROP_COLS)
# .dropna()
.set_index('days_passed')
.assign(block_reward=lambda x: x.simple_reward + x.baseline_reward)
.assign(marginal_reward=lambda x: x.block_reward / x.network_power)
)
# Mining Utility
# TODO: make sure that it matches the expected dataset for the
# visualizations
agg_df = None
return df # TODO use `agg_df` instead
| [
"baseline_model.types.GrowthScenario",
"cadCAD_tools.preparation.sweep_cartesian_product",
"baseline_model.params.RAW_PARAMS.copy",
"cadCAD_tools.execution.easy_run",
"pandas.concat"
] | [((835, 852), 'baseline_model.params.RAW_PARAMS.copy', 'RAW_PARAMS.copy', ([], {}), '()\n', (850, 852), False, 'from baseline_model.params import RAW_PARAMS\n'), ((994, 1031), 'cadCAD_tools.preparation.sweep_cartesian_product', 'sweep_cartesian_product', (['set_2_params'], {}), '(set_2_params)\n', (1017, 1031), False, 'from cadCAD_tools.preparation import sweep_cartesian_product\n'), ((1229, 1260), 'pandas.concat', 'pd.concat', (['[set_1_df, set_2_df]'], {}), '([set_1_df, set_2_df])\n', (1238, 1260), True, 'import pandas as pd\n'), ((947, 973), 'baseline_model.types.GrowthScenario', 'GrowthScenario', (['"""baseline"""'], {}), "('baseline')\n", (961, 973), False, 'from baseline_model.types import GrowthScenario\n'), ((593, 614), 'cadCAD_tools.execution.easy_run', 'easy_run', (['*set_1_args'], {}), '(*set_1_args)\n', (601, 614), False, 'from cadCAD_tools.execution import easy_run\n'), ((1148, 1169), 'cadCAD_tools.execution.easy_run', 'easy_run', (['*set_2_args'], {}), '(*set_2_args)\n', (1156, 1169), False, 'from cadCAD_tools.execution import easy_run\n'), ((1500, 1522), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (1509, 1522), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
#
# Copyright (c) 2014, <NAME>, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import os
import random
import sys
# notify of progress
print("P25")
sys.stdout.flush()
from rstem import led_matrix
import RPi.GPIO as GPIO
# notify of progress
print("P50")
sys.stdout.flush()
# initialization
#led_matrix.init_grid(angle=270) # make longwise
led_matrix.init_matrices([(0,8),(8,8),(8,0),(0,0)])
GPIO.setmode(GPIO.BCM)
# notify of progress
print("P70")
sys.stdout.flush()
# game variables
score = 0
BLINKING_TIME = 7 # number of cycles to blink full lines
speed = 0.5 # speed of piece falling down (at start)
init_speed = speed
class State(object):
IDLE, RESET, MOVINGDOWN, BLINKING, DONE, EXIT = range(6)
curr_state = State.IDLE
curr_piece = None
next_piece = None
stack = None
sidebar = None
blinking_clock = 0
# setup buttons
UP = 25
DOWN = 24
LEFT = 23
RIGHT = 18
SELECT = 22
START = 27
A = 4
B = 17
"""Shape names as described in U{http://en.wikipedia.org/wiki/Tetris}"""
SHAPES = "IJLOSTZ"
shape_sprites = {}
for shape in SHAPES:
# store LEDSprite of tetris piece
sprite = os.path.join(os.path.dirname(os.path.abspath(__file__)), "tetris_sprites", shape + ".spr")
shape_sprites[shape] = led_matrix.LEDSprite(sprite)
# notify of progress
print("P80")
sys.stdout.flush()
def valid_shape(shape):
"""
@returns: True if given shape is a valid tetris shape
"""
return shape in SHAPES and len(shape) == 1
class Sidebar(object):
"""Represents the side bar to the right of the stack. Will show score and next piece"""
def __init__(self, x_pos):
self.x_pos = x_pos
def draw(self):
led_matrix.line((self.x_pos, 0), (self.x_pos, led_matrix.height()-1))
# draw the next piece
next_piece.draw(pos=(self.x_pos + 3, led_matrix.height() - 5))
# draw the score (aligned nicely)
if score < 10:
led_matrix.text(str(score), (self.x_pos + 5, 1))
else:
led_matrix.text(str(score), (self.x_pos + 1, 1))
class Stack(object):
"""Represents the stack of rested tetris pieces"""
def __init__(self, width=None):
self.points = [] # 2D list that represents stacks's color for each point
# if width is none, use the entire screen
if width is None:
self.width = led_matrix.width()
else:
self.width = width
def height(self):
return len(self.points)
def add(self, piece):
"""Adds given piece to the stack
@param piece: piece to add to stack, should be touching current stack
@type piece: L{Piece}
"""
for y_offset, line in enumerate(reversed(piece.sprite.bitmap)): # iterate going up
# check if we need to add a new line to the stack
if (piece.pos[1] + y_offset) > (len(stack.points) - 1):
assert piece.pos[1] + y_offset == len(stack.points)
# add a new line to stack and fill with transparent pixels (this is new top of stack)
self.points.append([16 for i in range(self.width)])
# add line of piece to top of stack
for x_offset, pixel in enumerate(line):
# add piece if not transparent
if pixel != 16:
stack.points[piece.pos[1] + y_offset][piece.pos[0] + x_offset] = pixel
def draw(self, blinking_off=False):
"""Draws stack on led display
@param blinking_off: If set, it will display full lines as a line of all color == 0.
Useful for blinking full lines.
@type blinking_off: boolean
"""
for y, line in enumerate(self.points):
# show a line of color == 0 for full lines if currently blinking off
if blinking_off and all(pixel != 16 for pixel in line): # short-circuit avoids heavy computation if not needed
led_matrix.line((0,y), (self.width-1, y), color=0)
else:
for x, pixel in enumerate(line):
led_matrix.point(x, y, pixel)
def coverage(self):
"""
@returns: A set of the points that make up the stack
"""
ret = set()
for y, line in enumerate(self.points):
for x, pixel in enumerate(line):
if pixel != 16:
ret.add((x, y))
return ret
def remove_full_lines(self):
"""Removes lines that are full from stack
@returns: number of full lines removed
@rtype: int
"""
# remove lines in reverse so we don't mess it up if multiple lines need to be removed
score = 0
for y, line in reversed(list(enumerate(self.points))):
if all(pixel != 16 for pixel in line):
score += 1
del self.points[y]
return score
class Piece(object):
def __init__(self, shape, pos=None):
if not valid_shape(shape):
raise ValueError("Not a valid shape")
if pos is None:
self.pos = (int(stack.width/2 - 1), int(led_matrix.height()))
else:
self.pos = pos
self.sprite = shape_sprites[shape].copy() # get a copy of sprite
def rotate(self, clockwise=True):
"""Rotates the piece clockwise or counter-clockwise"""
# TODO: probably fix this, because I don't think it will rotate like I want it to
if clockwise:
self.sprite.rotate(90)
else:
self.sprite.rotate(270)
# move piece over if goes off display
while self.pos[0] + self.sprite.width - 1 >= stack.width:
self.pos = (self.pos[0] - 1, self.pos[1])
def coverage(self, pos=None):
"""Returns the set of points that the piece is covering.
@param pos: Set if we want to test the coverage as if the piece was at
that location.
@type pos: (x,y)
@returns: A set of points that the piece is covering
@rtype: set of 2 tuples
"""
if pos is None:
pos = self.pos
coverage = set()
for y, line in enumerate(reversed(self.sprite.bitmap)):
for x, pixel in enumerate(line):
if pixel != 16: # ignore transparent pixels in converage
coverage.add((pos[0] + x, pos[1] + y))
return coverage
def can_movedown(self):
"""Tests whether piece can move down without colliding with other piece
or falling off edge (hit bottom)
@param stack: current stack object
@type stack: L{Stack}
@rtype: boolean
"""
# check if it is at bottom of screen
if self.pos[1] <= 0:
return False
# get coverage pretending we moved down
pos = (self.pos[0], self.pos[1] - 1)
self_coverage = self.coverage(pos)
stack_coverage = stack.coverage()
return (len(self_coverage.intersection(stack.coverage())) == 0)
def movedown(self):
"""Moves piece down one pixel."""
self.pos = (self.pos[0], self.pos[1] - 1)
def moveright(self):
new_pos = (self.pos[0] + 1, self.pos[1])
# if we are not running off the stack width and not running into the stack, change position
if self.pos[0] + self.sprite.width < stack.width \
and len(self.coverage(new_pos).intersection(stack.coverage())) == 0:
self.pos = new_pos
def moveleft(self):
new_pos = (self.pos[0] - 1, self.pos[1])
# if we are not running off the display and not running into the stack, change position
if self.pos[0] - 1 >= 0 \
and len(self.coverage(new_pos).intersection(stack.coverage())) == 0:
self.pos = new_pos
def draw(self, pos=None):
"""Draws piece on led matrix"""
if pos is None:
led_matrix.sprite(self.sprite, self.pos)
else:
led_matrix.sprite(self.sprite, pos)
# what to do when button is pressed
def button_handler(channel):
global curr_state
if channel in [START, SELECT]:
curr_state = State.EXIT
return
if curr_state == State.MOVINGDOWN and curr_piece is not None:
try:
if channel == LEFT:
while GPIO.input(LEFT) == 0:
curr_piece.moveleft()
time.sleep(.2)
elif channel == RIGHT:
while GPIO.input(RIGHT) == 0:
curr_piece.moveright()
time.sleep(.2)
elif channel == A or channel == UP:
curr_piece.rotate(90)
except AttributeError:
# catch AttributeError that can happen if curr_piece is NoneType
pass
elif (curr_state == State.IDLE or curr_state == State.DONE) and channel == A:
curr_state = State.RESET
# set button handler to physical buttons
GPIO.setmode(GPIO.BCM)
for button in [UP, DOWN, LEFT, RIGHT, SELECT, START, A, B]:
GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(button, GPIO.FALLING, callback=button_handler, bouncetime=100)
# notify of progress
print("P90")
sys.stdout.flush()
# create intro title (a vertical display of "TETRIS")
title = led_matrix.LEDText("S").rotate(90)
for character in reversed("TETRI"):
# append a 1 pixel wide (high) spacing
title.append(led_matrix.LEDSprite(width=1, height=title.height))
# append next character
title.append(led_matrix.LEDText(character).rotate(90))
# rotate title up
title.rotate(-90)
# notify menu we are ready for the led matrix
print("READY")
sys.stdout.flush()
while True:
# state when a piece is slowly moving down the display
if curr_state == State.MOVINGDOWN:
# up speed if score is a multiple of 5
if score != 0 and score % 5 == 0:
new_speed = init_speed - (score/5*0.1)
if new_speed > 0:
speed = new_speed
# check if stack hit the top of display
if stack.height() >= led_matrix.height() - 1:
curr_state = State.DONE
continue
# check if piece can't move down, and if so, add piece to stack and start blinking any full lines
if not curr_piece.can_movedown():
stack.add(curr_piece) # add piece to stack
curr_piece = None # piece is no longer curr_piece
blinking_clock = BLINKING_TIME # set up blinking clock
curr_state = State.BLINKING # goto blinking state
continue
# otherwise move piece down
curr_piece.movedown()
# show screen
led_matrix.erase()
curr_piece.draw()
stack.draw()
if sidebar is not None:
sidebar.draw()
led_matrix.show()
# speed up delay if DOWN button is held down
if GPIO.input(DOWN) == 0:
time.sleep(.005)
else:
time.sleep(speed)
# when piece has hit that stack and we determine if a line has been filled
elif curr_state == State.BLINKING:
# when blinking clock counts down to zero, remove full lines and start a new piece
if blinking_clock == 0:
score += stack.remove_full_lines() # add full lines to total score
# make a new next_piece and goto moving the new curr_piece down
curr_piece = next_piece
next_piece = Piece(random.choice(SHAPES))
curr_state = State.MOVINGDOWN
else:
# draw blinking full lines (if any)
led_matrix.erase()
# draw stack with full lines off every other cycle
stack.draw(blinking_off=(blinking_clock % 2))
if sidebar is not None:
sidebar.draw()
led_matrix.show()
blinking_clock -= 1
time.sleep(.1)
elif curr_state == State.IDLE:
# display scrolling virtical text
y_pos = - title.height
while y_pos < led_matrix.height():
# if state changes stop scrolling and go to that state
if curr_state != State.IDLE:
break
# display title in the center of the screen
led_matrix.erase()
led_matrix.sprite(title, (int(led_matrix.width()/2) - int(title.width/2), y_pos))
led_matrix.show()
y_pos += 1
time.sleep(.1)
elif curr_state == State.RESET:
score = 0
stack = None
if led_matrix.width() < 16:
stack = Stack()
else:
stack = Stack(8) # if screen too width only use left half for stack
sidebar = None
sidebar = Sidebar(8)
curr_piece = Piece(random.choice(SHAPES))
next_piece = Piece(random.choice(SHAPES))
curr_state = State.MOVINGDOWN
elif curr_state == State.DONE:
led_matrix.erase()
led_matrix.text(str(score))
led_matrix.show()
elif curr_state == State.EXIT:
GPIO.cleanup()
led_matrix.cleanup()
sys.exit(0)
| [
"rstem.led_matrix.init_matrices",
"time.sleep",
"sys.exit",
"RPi.GPIO.setmode",
"RPi.GPIO.cleanup",
"rstem.led_matrix.LEDText",
"rstem.led_matrix.width",
"rstem.led_matrix.show",
"rstem.led_matrix.erase",
"sys.stdout.flush",
"rstem.led_matrix.sprite",
"random.choice",
"RPi.GPIO.setup",
"rstem.led_matrix.height",
"rstem.led_matrix.line",
"rstem.led_matrix.LEDSprite",
"rstem.led_matrix.cleanup",
"RPi.GPIO.add_event_detect",
"RPi.GPIO.input",
"rstem.led_matrix.point",
"os.path.abspath"
] | [((694, 712), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (710, 712), False, 'import sys\n'), ((802, 820), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (818, 820), False, 'import sys\n'), ((889, 947), 'rstem.led_matrix.init_matrices', 'led_matrix.init_matrices', (['[(0, 8), (8, 8), (8, 0), (0, 0)]'], {}), '([(0, 8), (8, 8), (8, 0), (0, 0)])\n', (913, 947), False, 'from rstem import led_matrix\n'), ((942, 964), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (954, 964), True, 'import RPi.GPIO as GPIO\n'), ((1000, 1018), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1016, 1018), False, 'import sys\n'), ((1830, 1848), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1846, 1848), False, 'import sys\n'), ((9619, 9641), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (9631, 9641), True, 'import RPi.GPIO as GPIO\n'), ((9884, 9902), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9900, 9902), False, 'import sys\n'), ((10339, 10357), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10355, 10357), False, 'import sys\n'), ((1766, 1794), 'rstem.led_matrix.LEDSprite', 'led_matrix.LEDSprite', (['sprite'], {}), '(sprite)\n', (1786, 1794), False, 'from rstem import led_matrix\n'), ((9706, 9759), 'RPi.GPIO.setup', 'GPIO.setup', (['button', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(button, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (9716, 9759), True, 'import RPi.GPIO as GPIO\n'), ((9764, 9852), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['button', 'GPIO.FALLING'], {'callback': 'button_handler', 'bouncetime': '(100)'}), '(button, GPIO.FALLING, callback=button_handler,\n bouncetime=100)\n', (9785, 9852), True, 'import RPi.GPIO as GPIO\n'), ((9970, 9993), 'rstem.led_matrix.LEDText', 'led_matrix.LEDText', (['"""S"""'], {}), "('S')\n", (9988, 9993), False, 'from rstem import led_matrix\n'), ((10101, 10151), 'rstem.led_matrix.LEDSprite', 'led_matrix.LEDSprite', ([], {'width': '(1)', 'height': 'title.height'}), '(width=1, height=title.height)\n', (10121, 10151), False, 'from rstem import led_matrix\n'), ((11403, 11421), 'rstem.led_matrix.erase', 'led_matrix.erase', ([], {}), '()\n', (11419, 11421), False, 'from rstem import led_matrix\n'), ((11536, 11553), 'rstem.led_matrix.show', 'led_matrix.show', ([], {}), '()\n', (11551, 11553), False, 'from rstem import led_matrix\n'), ((1677, 1702), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1692, 1702), False, 'import os\n'), ((2873, 2891), 'rstem.led_matrix.width', 'led_matrix.width', ([], {}), '()\n', (2889, 2891), False, 'from rstem import led_matrix\n'), ((8581, 8621), 'rstem.led_matrix.sprite', 'led_matrix.sprite', (['self.sprite', 'self.pos'], {}), '(self.sprite, self.pos)\n', (8598, 8621), False, 'from rstem import led_matrix\n'), ((8648, 8683), 'rstem.led_matrix.sprite', 'led_matrix.sprite', (['self.sprite', 'pos'], {}), '(self.sprite, pos)\n', (8665, 8683), False, 'from rstem import led_matrix\n'), ((11627, 11643), 'RPi.GPIO.input', 'GPIO.input', (['DOWN'], {}), '(DOWN)\n', (11637, 11643), True, 'import RPi.GPIO as GPIO\n'), ((11662, 11679), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (11672, 11679), False, 'import time\n'), ((11705, 11722), 'time.sleep', 'time.sleep', (['speed'], {}), '(speed)\n', (11715, 11722), False, 'import time\n'), ((4486, 4539), 'rstem.led_matrix.line', 'led_matrix.line', (['(0, y)', '(self.width - 1, y)'], {'color': '(0)'}), '((0, y), (self.width - 1, y), color=0)\n', (4501, 4539), False, 'from rstem import led_matrix\n'), ((10198, 10227), 'rstem.led_matrix.LEDText', 'led_matrix.LEDText', (['character'], {}), '(character)\n', (10216, 10227), False, 'from rstem import led_matrix\n'), ((10763, 10782), 'rstem.led_matrix.height', 'led_matrix.height', ([], {}), '()\n', (10780, 10782), False, 'from rstem import led_matrix\n'), ((12326, 12344), 'rstem.led_matrix.erase', 'led_matrix.erase', ([], {}), '()\n', (12342, 12344), False, 'from rstem import led_matrix\n'), ((12545, 12562), 'rstem.led_matrix.show', 'led_matrix.show', ([], {}), '()\n', (12560, 12562), False, 'from rstem import led_matrix\n'), ((12607, 12622), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (12617, 12622), False, 'import time\n'), ((2252, 2271), 'rstem.led_matrix.height', 'led_matrix.height', ([], {}), '()\n', (2269, 2271), False, 'from rstem import led_matrix\n'), ((4624, 4653), 'rstem.led_matrix.point', 'led_matrix.point', (['x', 'y', 'pixel'], {}), '(x, y, pixel)\n', (4640, 4653), False, 'from rstem import led_matrix\n'), ((5727, 5746), 'rstem.led_matrix.height', 'led_matrix.height', ([], {}), '()\n', (5744, 5746), False, 'from rstem import led_matrix\n'), ((8991, 9007), 'RPi.GPIO.input', 'GPIO.input', (['LEFT'], {}), '(LEFT)\n', (9001, 9007), True, 'import RPi.GPIO as GPIO\n'), ((9076, 9091), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9086, 9091), False, 'import time\n'), ((12187, 12208), 'random.choice', 'random.choice', (['SHAPES'], {}), '(SHAPES)\n', (12200, 12208), False, 'import random\n'), ((12761, 12780), 'rstem.led_matrix.height', 'led_matrix.height', ([], {}), '()\n', (12778, 12780), False, 'from rstem import led_matrix\n'), ((12980, 12998), 'rstem.led_matrix.erase', 'led_matrix.erase', ([], {}), '()\n', (12996, 12998), False, 'from rstem import led_matrix\n'), ((13105, 13122), 'rstem.led_matrix.show', 'led_matrix.show', ([], {}), '()\n', (13120, 13122), False, 'from rstem import led_matrix\n'), ((13158, 13173), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (13168, 13173), False, 'import time\n'), ((2351, 2370), 'rstem.led_matrix.height', 'led_matrix.height', ([], {}), '()\n', (2368, 2370), False, 'from rstem import led_matrix\n'), ((9148, 9165), 'RPi.GPIO.input', 'GPIO.input', (['RIGHT'], {}), '(RIGHT)\n', (9158, 9165), True, 'import RPi.GPIO as GPIO\n'), ((9235, 9250), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9245, 9250), False, 'import time\n'), ((13268, 13286), 'rstem.led_matrix.width', 'led_matrix.width', ([], {}), '()\n', (13284, 13286), False, 'from rstem import led_matrix\n'), ((13503, 13524), 'random.choice', 'random.choice', (['SHAPES'], {}), '(SHAPES)\n', (13516, 13524), False, 'import random\n'), ((13553, 13574), 'random.choice', 'random.choice', (['SHAPES'], {}), '(SHAPES)\n', (13566, 13574), False, 'import random\n'), ((13666, 13684), 'rstem.led_matrix.erase', 'led_matrix.erase', ([], {}), '()\n', (13682, 13684), False, 'from rstem import led_matrix\n'), ((13729, 13746), 'rstem.led_matrix.show', 'led_matrix.show', ([], {}), '()\n', (13744, 13746), False, 'from rstem import led_matrix\n'), ((13799, 13813), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (13811, 13813), True, 'import RPi.GPIO as GPIO\n'), ((13822, 13842), 'rstem.led_matrix.cleanup', 'led_matrix.cleanup', ([], {}), '()\n', (13840, 13842), False, 'from rstem import led_matrix\n'), ((13851, 13862), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (13859, 13862), False, 'import sys\n'), ((13041, 13059), 'rstem.led_matrix.width', 'led_matrix.width', ([], {}), '()\n', (13057, 13059), False, 'from rstem import led_matrix\n')] |
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2020 QandA-vietnam.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import logging
from elasticsearch import Elasticsearch
LOGGER = logging.getLogger(__name__)
es = Elasticsearch([{"host": "localhost", "port": 9200, "timeout": 60}])
default_settings = {
"index": {
"number_of_shards": 1,
"number_of_replicas": 1,
},
"analysis": {
"analyzer": {
"my_analyzer": {
"type": "custom",
"tokenizer": "vi_tokenizer",
"filter": [
"lowercase",
"vi_stop",
"asciifolding"
]
}
}
}
}
question_index_settings = {
"settings": default_settings,
"mappings": {
"properties": {
"id": {"type": "integer"},
"title": {"type": "text", "analyzer": "my_analyzer"},
"content": {"type": "text", "analyzer": "my_analyzer"},
"date_create": {"type": "date"},
"tags": {"type": "keyword"},
"owner_id": {"type": "keyword"},
}
}
}
answer_index_settings = {
"settings": default_settings,
"mappings": {
"properties": {
"id": {"type": "integer"},
"content": {"type": "text", "analyzer": "my_analyzer"},
"date_create": {"type": "date"},
"owner_id": {"type": "keyword"},
}
}
}
reputation_score_index_settings = {
"settings": default_settings,
"mappings": {
"properties": {
"points_received": {"type": "integer"},
"date_received": {"type": "date"},
"owner_id": {"type": "keyword"},
"username": {"type": "keyword"},
"type_received": {"type": "keyword"}
}
}
}
def init():
if not es.indices.exists(index='question'):
es.indices.create(index='question', body=question_index_settings)
if not es.indices.exists(index='answer'):
es.indices.create(index='answer', body=answer_index_settings)
if not es.indices.exists(index='reputation'):
es.indices.create(index='reputation', body=reputation_score_index_settings)
def question_index(question_id,
title,
content,
date_create,
tags,
owner_id):
"""
Create an document and save to elasticsearch.
Document's id will be set to question_id.
"""
body = {
"question_id": question_id,
"title": title,
"content": content,
"date_create": date_create,
"tags": tags,
"owner_id": owner_id
}
return index_document('question', document=body, doc_id=question_id)
def answer_index(answer_id,
content,
date_create,
owner_id):
"""
Create an document and save to elasticsearch.
Document's id will be set to answer_id.
"""
body = {
"answer_id": answer_id,
"content": content,
"date_create": date_create,
"owner_id": owner_id
}
return index_document(index_name='answer', document=body, doc_id=answer_id)
def reputation_index(points_received,
date_received,
owner_id,
username,
type_received,
**kwargs):
body = {
"points_received": points_received,
"date_received": date_received,
"owner_id": owner_id,
"username": username,
"type_received": type_received
}
body.update(kwargs)
return index_document(index_name='reputation', document=body)
def index_document(index_name, document, doc_id=None):
return es.index(index=index_name, body=document, id=doc_id)
def discuss_search(query, start, size, sort=None, tags=None):
scripts = {
"from": start,
"size": size,
"query": {
"bool": {
"must": {
"multi_match": {
"query": query,
"type": "most_fields",
"fields": ["title", "content"]
}
}
}
},
"highlight": {
"pre_tags": ["<b>"],
"post_tags": ["</b>"],
"fields": {
"content": {
"type": "plain",
"fragment_size": 300,
"number_of_fragments": 1
}
}
}
}
if tags:
scripts['query']['bool']['filter'] = {'terms': {'tags': tags}}
if sort:
if sort == "relevance":
scripts['sort'] = "_score"
elif sort == "newest":
scripts['sort'] = [{"date_create": {"order": "desc"}}, "_score"]
filter_path = ['hits.total', 'hits.hits', 'hits.hits._source.highlight',
'hits.hits._source._index', 'hits.hits._source._id']
return es.search(body=scripts, index=["question", "answer"], filter_path=filter_path)
def question_update(question_id,
title,
content,
tags):
"""
Partial update document with id is 'question_id' in 'question' index
"""
body = {
"script": {
"source": "ctx._source.title=params.title;"
"ctx._source.content=params.content;"
"ctx._source.tags=params.tags",
"params": {
"title": title,
"content": content,
"tags": tags
}
}
}
return es.update(index='question', id=question_id, body=body)
def answer_update(answer_id,
content):
"""
Partial update document with id is 'answer_id' in 'answer' index
"""
body = {
"script": {
"source": "ctx._source.content=params.content;",
"params": {
"content": content
}
}
}
return es.update(index='question', id=answer_id, body=body)
def reputation_vote_update(points, vote_id):
body = {
"script": {
"source": "ctx._source.points_received=params.points;",
"params": {
"points": points
}
},
"query": {
"term": {
"vote_id": vote_id
}
}
}
return es.update_by_query(index='reputation', body=body)
def question_delete(question_id):
"""
Delete document with id is 'question_id'
"""
es.delete(index='question', id=question_id)
def answer_delete(answer_id):
"""
Delete document with id is 'answer_id'
"""
es.delete(index='answer', id=answer_id)
def reputation_delete(**kwargs):
"""
Delete by query
"""
query = {
"query": {
"bool": {
"must": {}
}
}
}
for key in kwargs:
query["query"]["bool"]["must"].update({"term": {key: kwargs[key]}})
es.delete_by_query(index="reputation", body=query)
| [
"logging.getLogger",
"elasticsearch.Elasticsearch"
] | [((846, 873), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (863, 873), False, 'import logging\n'), ((880, 947), 'elasticsearch.Elasticsearch', 'Elasticsearch', (["[{'host': 'localhost', 'port': 9200, 'timeout': 60}]"], {}), "([{'host': 'localhost', 'port': 9200, 'timeout': 60}])\n", (893, 947), False, 'from elasticsearch import Elasticsearch\n')] |
from base_connection import Connection
import subprocess
import os
import re
import datetime
import pandas as pd
class Bc3(Connection):
def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/panfs/panasas01/emat/oc13378/WholeCell/output', base_runfiles_path = '/panfs/panasas01/emat/oc13378/WholeCell/wc/mg/bc3/runFiles', wholecell_model_master_dir = '/panfs/panasas01/emat/oc13378/WholeCell/wc/mg/WholeCell-master'):
Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email)
self.submit_command = 'qsub'
self.information_about_cluster = 'BlueCrystal Phase 3: Advanced Computing Research Centre, University of Bristol.'
self.base_output_path = base_output_path
self.base_runfiles_path = base_runfiles_path
self.wholecell_model_master_dir = wholecell_model_master_dir
self.activate_venv_list = ['module add languages/python-anaconda-4.2-3.5', 'source activate wholecell_modelling_suite']
self.path_to_flex1 = '/panfs/panasas01/bluegem-flex1'
self.path_to_database_dir = self.path_to_flex1
self.db_connection = self
#instance methhods
# def rsyncFile(self, source, destination, rsync_flags = "-aP"):
# super(bc3, self).rsyncFile(source, destination, rsync_flags)
#
# return
def getGeneInfo(self, tuple_of_genes):
raw_out = self.useStaticDbFunction([tuple_of_genes], 'CodeToInfo')
if raw_out[0] == 0:
as_list = eval(raw_out[1].strip().decode('ascii'))
list_of_column_names = ['code', 'type', 'name', 'symbol', 'functional_unit', 'deletion_phenotype', 'essential_in_model', 'essential_in_experiment']
dict_out = {list_of_column_names[name_idx]: [as_list[element_idx][name_idx] for element_idx in range(len(as_list))] for name_idx in range(len(list_of_column_names))}
else:
raise ValueError("Failed to retrieve sql data. Query returned: ", raw_out)
return dict_out
def useStaticDbFunction(self, list_of_function_inputs, function_call, path_to_staticDb_stuff='/panfs/panasas01/bluegem-flex1/database/staticDB'):
"""Takes a tuple of gene codes and return info about the genes"""
add_anoconda_module = 'module add languages/python-anaconda-4.2-3.5'
activate_virtual_environment = 'source activate wholecell_modelling_suite'
change_to_lib_dir = 'cd ' + path_to_staticDb_stuff
get_data = 'python -c "from staticDB import io as sio;static_db_conn = sio();print(static_db_conn.' + function_call + '(' + ','.join(map(str, list_of_function_inputs)) + '))"'
cmd = "ssh " + self.ssh_config_alias + ";" + add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_data
cmd_list = ["ssh", self.ssh_config_alias, add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_data]
raw_out = Connection.getOutput(cmd_list)
return raw_out
def sendSqlToStaticDb(self, sql_command, path_to_staticDb_stuff='/panfs/panasas01/bluegem-flex1/database/staticDB'):
"""Takes an SQLITE3 command as a string and sends it to static.db and returns it the output."""
add_anoconda_module = 'module add languages/python-anaconda-4.2-3.5'
activate_virtual_environment = 'source activate wholecell_modelling_suite'
change_to_lib_dir = 'cd ' + path_to_staticDb_stuff
get_data = 'python -c "from staticDB import io as sio;static_db_conn = sio();print(static_db_conn.raw_sql_query(\'' + sql_command + '\'))"'
cmd = "ssh " + self.ssh_config_alias + ";" + add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_data
cmd_list = ["ssh", self.ssh_config_alias, add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_data]
raw_out = Connection.getOutput(cmd_list)
return raw_out
def convertGeneCodeToId(self, tuple_of_gene_codes, path_to_staticDb_stuff='/panfs/panasas01/bluegem-flex1/database/staticDB'):
"""Takes a tuple of genes code and returns a tuple of corresponding gene IDs."""
if type(tuple_of_gene_codes) is not tuple:
raise TypeException('Gene codes must be a tuple (even if only 1! i.e. single_tuple = (\'MG_001\',)) here type(tuple_of_gene_codes)=', type(tuple_of_gene_codes))
add_anoconda_module = 'module add languages/python-anaconda-4.2-3.5'
activate_virtual_environment = 'source activate wholecell_modelling_suite'
change_to_lib_dir = 'cd ' + path_to_staticDb_stuff
get_gene_id = 'python -c "from staticDB import io as sio;static_db_conn = sio();print(static_db_conn.CodeToId(' + str(tuple_of_gene_codes) + '))"'
cmd = "ssh " + self.ssh_config_alias + ";" + add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id
cmd_list = ["ssh", self.ssh_config_alias, add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id]
raw_out = Connection.getOutput(cmd_list)
# send command and get output
output = raw_out
output[1] = eval(str(output[1], "utf-8").rstrip())
# it doesn't output the answer in the order you input it so we need to make a dictionary
codeToId_dict = {}
for out in output[1]:
codeToId_dict[out[1]] = out[0]
return codeToId_dict
def checkQueue(self, job_number='NONE'):
"""This function takes a job number and returns a list of all the array numbers of that job still running."""
if job_number == 'NONE':
print("Warning: No job number given!")
grep_part_of_cmd = "qstat -tu " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"[][]\" \'{print $2}\'"
# cmd = ["ssh", self.ssh_config_alias, grep_part_of_cmd]
output_dict = self.sendCommand([grep_part_of_cmd])
return output_dict
def checkDiskUsage(self):
"""This function returns disk usage details."""
# create all the post connection commands needed
get_disk_usage_units_command = "pan_quota | awk \'{print $1}\' | tail -n 2 | head -n 1 | sed \'s/[<>]//g\'"
get_disk_usage_command = "pan_quota | awk \'{print $1}\' | tail -n 1"
get_disk_usage_soft_limit_command = "pan_quota | awk \'{print $2}\' | tail -n 1"
get_disk_usage_hard_limit_command = "pan_quota | awk \'{print $3}\' | tail -n 1"
# combine the connection command with the post connection commands in a list (as is recomended).
units_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_units_command]
usage_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_command]
soft_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_soft_limit_command]
hard_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_hard_limit_command]
# send the commands and save the exit codes and outputs
units = Connection.getOutput(units_cmd)
usage = Connection.getOutput(usage_cmd)
soft_limit = Connection.getOutput(soft_limit_cmd)
hard_limit = Connection.getOutput(hard_limit_cmd)
# convert string outputs to floats where neccessary
units[1] = str(units[1], "utf-8").rstrip()
usage[1] = float(usage[1])
soft_limit[1] = float(soft_limit[1])
hard_limit[1] = float(hard_limit[1])
# print some stats
print(100 * (usage[1] / (1.0 * hard_limit[1]) ),"% of total disk space used.\n\n",hard_limit[1] - usage[1]," ",units[1]," left until hard limit.\n\n",soft_limit[1] - usage[1]," ",units[1]," left unit soft limit.", sep='')
return usage, soft_limit, hard_limit, units
def createStandardKoSubmissionScript(self, output_filename, pbs_job_name, no_of_unique_kos, path_and_name_of_unique_ko_dir_names, no_of_repetitions_of_each_ko, wholecell_model_master_dir, output_dir, path_and_name_of_ko_codes, outfile_name_and_path, errorfile_name_and_path):
import subprocess
# this shouldn't change but gonna leave it there just in case
queue_name = 'short'
# set job array numbers to None so that we can check stuff has wprked later
job_array_numbers = None
# The maximum job array size on BC3
max_job_array_size = 500
# initialise output dict
output_dict = {}
# test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc)
total_sims = no_of_unique_kos * no_of_repetitions_of_each_ko
if total_sims > 20000:
raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims)
output_dict['total_sims'] = total_sims
# spread simulations across array jobs
if no_of_unique_kos <= max_job_array_size:
no_of_unique_kos_per_array_job = 1
no_of_arrays = no_of_unique_kos
job_array_numbers = '1-' + str(no_of_unique_kos)
walltime = '30:00:00'
else:
# job_array_size * no_of_unique_kos_per_array_job = no_of_unique_kos so all the factors of no_of_unique_kos is
common_factors = [x for x in range(1, no_of_unique_kos+1) if no_of_unique_kos % x == 0]
# make the job_array_size as large as possible such that it is less than max_job_array_size
factor_idx = len(common_factors) - 1
while factor_idx >= 0:
if common_factors[factor_idx] < max_job_array_size:
job_array_numbers = '1-' + str(common_factors[factor_idx])
no_of_arrays = common_factors[factor_idx]
no_of_unique_kos_per_array_job = common_factors[(len(common_factors)-1) - factor_idx]
factor_idx = -1
else:
factor_idx -= 1
# raise error if no suitable factors found!
if job_array_numbers is None:
raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_kos=', no_of_unique_kos, ' and the common factors of this number are:', common_factors)
# add some time to the walltime because I don't think the jobs have to startat the same time
walltime = '35:00:00'
output_dict['no_of_arrays'] = no_of_arrays
output_dict['no_of_unique_kos_per_array_job'] = no_of_unique_kos_per_array_job
output_dict['no_of_repetitions_of_each_ko'] = no_of_repetitions_of_each_ko
# calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway)
no_of_cores = no_of_repetitions_of_each_ko * no_of_unique_kos_per_array_job
output_dict['no_of_sims_per_array_job'] = no_of_cores
output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_ko + 1))
no_of_nodes = 1
# write the script to file
with open(output_filename, mode='wt', encoding='utf-8') as myfile:
myfile.write("#!/bin/bash" + "\n")
myfile.write("\n")
myfile.write("# This script was automatically created by <NAME>'s whole-cell modelling suite. Please contact on <EMAIL>" + "\n")
myfile.write("# Title: " + pbs_job_name + "\n")
myfile.write("# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n")
myfile.write("# Affiliation: Minimal Genome Group, Life Sciences, University of Bristol " + "\n")
myfile.write("# Last Updated: " + str(datetime.datetime.now()) + "\n")
myfile.write("\n")
myfile.write("# BC3: 223 base blades which have 16 x 2.6 GHz SandyBridge cores, 4GB/core and a 1TB SATA disk." + "\n")
myfile.write("\n")
myfile.write("## Job name" + "\n")
myfile.write("#PBS -N " + pbs_job_name + "\n")
myfile.write("\n")
myfile.write("## Resource request" + "\n")
myfile.write("#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n")
myfile.write("#PBS -q " + queue_name + "\n")
myfile.write("\n")
myfile.write("## Job array request" + "\n")
myfile.write("#PBS -t " + job_array_numbers + "\n")
myfile.write("\n")
myfile.write("## designate output and error files" + "\n")
myfile.write("#PBS -e " + outfile_name_and_path + "\n")
myfile.write("#PBS -o " + errorfile_name_and_path + "\n")
myfile.write("\n")
myfile.write("# print some details about the job" + "\n")
myfile.write('echo "The Array ID is: ${PBS_ARRAYID}"' + "\n")
myfile.write('echo Running on host `hostname`' + "\n")
myfile.write('echo Time is `date`' + "\n")
myfile.write('echo Directory is `pwd`' + "\n")
myfile.write('echo PBS job ID is ${PBS_JOBID}' + "\n")
myfile.write('echo This job runs on the following nodes:' + "\n")
myfile.write('echo `cat $PBS_NODEFILE | uniq`' + "\n")
myfile.write("\n")
myfile.write("# load required modules" + "\n")
myfile.write("module unload apps/matlab-r2013b" + "\n")
myfile.write("module load apps/matlab-r2013a" + "\n")
myfile.write('echo "Modules loaded:"' + "\n")
myfile.write("module list" + "\n")
myfile.write("\n")
myfile.write("# create the master directory variable" + "\n")
myfile.write("master=" + wholecell_model_master_dir + "\n")
myfile.write("\n")
myfile.write("# create output directory" + "\n")
myfile.write("base_outDir=" + output_dir + "\n")
myfile.write("\n")
myfile.write("# collect the KO combos" + "\n")
myfile.write("ko_list=" + path_and_name_of_ko_codes + "\n")
myfile.write("ko_dir_names=" + path_and_name_of_unique_ko_dir_names + "\n")
myfile.write("\n")
myfile.write("# Get all the gene KOs and output folder names" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write('do' + "\n")
myfile.write(' Gene[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${PBS_ARRAYID}-1)+${i})) ${ko_list})' + "\n")
myfile.write(' unique_ko_dir_name[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${PBS_ARRAYID}-1)+${i})) ${ko_dir_names})' + "\n")
myfile.write("done" + "\n")
myfile.write("\n")
myfile.write("# go to master directory" + "\n")
myfile.write("cd ${master}" + "\n")
myfile.write("\n")
myfile.write("# NB have limited MATLAB to a single thread" + "\n")
myfile.write('options="-nodesktop -noFigureWindows -nosplash -singleCompThread"' + "\n")
myfile.write("\n")
myfile.write("# run 16 simulations in parallel")
myfile.write('echo "Running simulations (single threaded) in parallel - let\'s start the timer!"' + "\n")
myfile.write('start=`date +%s`' + "\n")
myfile.write("\n")
myfile.write("# create all the directories for the diarys (the normal output will be all mixed up cause it's in parrallel!)" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write("do" + "\n")
myfile.write(' for j in `seq 1 ' + str(no_of_repetitions_of_each_ko) + '`' + "\n")
myfile.write(" do" + "\n")
myfile.write(' specific_ko="$(echo ${Gene[${i}]} | sed \'s/{//g\' | sed \'s/}//g\' | sed \"s/\'//g\" | sed \'s/\"//g\' | sed \'s/,/-/g\')/${j}"' + "\n")
myfile.write(' mkdir -p ${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}' + "\n")
myfile.write(' matlab ${options} -r "diary(\'${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}/diary.out\');addpath(\'${master}\');setWarnings();setPath();runSimulation(\'runner\',\'koRunner\',\'logToDisk\',true,\'outDir\',\'${base_outDir}/${unique_ko_dir_name[${i}]}/${j}\',\'jobNumber\',$((no_of_repetitions_of_each_ko*no_of_unique_kos_per_array_job*(${PBS_ARRAYID}-1)+no_of_unique_kos_per_array_job*(${i}-1)+${j})),\'koList\',{{${Gene[${i}]}}});diary off;exit;" &' + "\n")
myfile.write(" done" + "\n")
myfile.write("done" + "\n")
myfile.write("wait" + "\n")
myfile.write("\n")
myfile.write("end=`date +%s`" + "\n")
myfile.write("runtime=$((end-start))" + "\n")
myfile.write('echo "$((${no_of_unique_kos_per_array_job}*${no_of_repetitions_of_each_ko})) simulations took: ${runtime} seconds."')
# give the file execute permissions
subprocess.check_call(["chmod", "700", str(output_filename)])
return output_dict
def getJobIdFromSubStdOut(self, stdout):
return int(re.search(r'\d+', stdout).group())
class Bg(Connection):
def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/projects/flex1/database/wcm_suite/output', base_runfiles_path = '/projects/flex1/database/wcm_suite/runFiles', wholecell_model_master_dir = '/panfs/panasas01/emat/oc13378/WholeCell/wc/mg/WholeCell-master'):
Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email)
self.submit_command = 'sbatch'
self.information_about_cluster = 'BlueGem: BrisSynBio, University of Bristol.'
self.base_output_path = base_output_path
self.base_runfiles_path = base_runfiles_path
self.wholecell_model_master_dir = wholecell_model_master_dir
self.activate_venv_list = ['module add apps/anaconda3-2.3.0', 'source activate whole_cell_modelling_suite']
self.path_to_flex1 = '/projects/flex1'
self.path_to_database_dir = self.path_to_flex1
self.db_connection = self
#instance methhods
# def rsyncFile(self, source, destination, rsync_flags = "-aP"):
# super(bc3, self).rsyncFile(source, destination, rsync_flags)
#
# return
def convertGeneCodeToId(self, tuple_of_gene_codes, path_to_staticDb_stuff='/projects/flex1/database/staticDB'):
"""Takes a tuple of genes code and returns a tuple of corresponding gene IDs."""
if type(tuple_of_gene_codes) is not tuple:
raise TypeException('Gene codes must be a tuple (even if only 1! i.e. single_tuple = (\'MG_001\',)) here type(tuple_of_gene_codes)=', type(tuple_of_gene_codes))
add_anoconda_module = 'module add apps/anaconda3-2.3.0'
activate_virtual_environment = 'source activate whole_cell_modelling_suite'
change_to_lib_dir = 'cd ' + path_to_staticDb_stuff
get_gene_id = 'python -c "from staticDB import io as sio;static_db_conn = sio();print(static_db_conn.CodeToId(' + str(tuple_of_gene_codes) + '))"'
cmd = "ssh " + self.ssh_config_alias + ";" + add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id
cmd_list = ["ssh", self.ssh_config_alias, add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id]
raw_out = Connection.getOutput(cmd_list)
# send command and get output
output = raw_out
output[1] = eval(str(output[1], "utf-8").rstrip())
# it doesn't output the answer in the order you input it so we need to make a dictionary
codeToId_dict = {}
for out in output[1]:
codeToId_dict[out[1]] = out[0]
return codeToId_dict
def checkQueue(self, job_number):
"""This function takes a job number and returns a list of all the array numbers of that job still running."""
grep_part_of_cmd = "squeue -ru " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"_\" \'{print $2}\'"
# cmd = ["ssh", self.ssh_config_alias, grep_part_of_cmd]
output_dict = self.checkSuccess(self.sendCommand, [grep_part_of_cmd])
return output_dict
def checkDiskUsage(self):
"""This function returns disk usage details."""
# create all the post connection commands needed
get_disk_usage_units_command = "pan_quota | awk \'{print $1}\' | tail -n 2 | head -n 1 | sed \'s/[<>]//g\'"
get_disk_usage_command = "pan_quota | awk \'{print $1}\' | tail -n 1"
get_disk_usage_soft_limit_command = "pan_quota | awk \'{print $2}\' | tail -n 1"
get_disk_usage_hard_limit_command = "pan_quota | awk \'{print $3}\' | tail -n 1"
# combine the connection command with the post connection commands in a list (as is recomended).
units_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_units_command]
usage_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_command]
soft_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_soft_limit_command]
hard_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_hard_limit_command]
# send the commands and save the exit codes and outputs
units = Connection.getOutput(units_cmd)
usage = Connection.getOutput(usage_cmd)
soft_limit = Connection.getOutput(soft_limit_cmd)
hard_limit = Connection.getOutput(hard_limit_cmd)
# convert string outputs to floats where neccessary
units[1] = str(units[1], "utf-8").rstrip()
usage[1] = float(usage[1])
soft_limit[1] = float(soft_limit[1])
hard_limit[1] = float(hard_limit[1])
# print some stats
print(100 * (usage[1] / (1.0 * hard_limit[1]) ),"% of total disk space used.\n\n",hard_limit[1] - usage[1]," ",units[1]," left until hard limit.\n\n",soft_limit[1] - usage[1]," ",units[1]," left unit soft limit.", sep='')
return usage, soft_limit, hard_limit, units
def createStandardKoSubmissionScript(self, output_filename, slurm_job_name, no_of_unique_kos, path_and_name_of_unique_ko_dir_names, no_of_repetitions_of_each_ko, wholecell_model_master_dir, output_dir, path_and_name_of_ko_codes, outfile_name_and_path, errorfile_name_and_path):
# this shouldn't change but gonna leave it there just in case
queue_name = 'cpu'
# set job array numbers to None so that we can check stuff has wprked later
job_array_numbers = None
# The maximum job array size on BC3
max_job_array_size = 200
# initialise output dict
output_dict = {}
# test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc)
total_sims = no_of_unique_kos * no_of_repetitions_of_each_ko
if total_sims > 20000:
raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims)
output_dict['total_sims'] = total_sims
# spread simulations across array jobs
if no_of_unique_kos <= max_job_array_size:
no_of_unique_kos_per_array_job = 1
no_of_arrays = no_of_unique_kos
job_array_numbers = '1-' + str(no_of_unique_kos)
walltime = '0-30:00:00'
else:
# job_array_size * no_of_unique_kos_per_array_job = no_of_unique_kos so all the factors of no_of_unique_kos is
common_factors = [x for x in range(1, no_of_unique_kos+1) if no_of_unique_kos % x == 0]
# make the job_array_size as large as possible such that it is less than max_job_array_size
factor_idx = len(common_factors) - 1
while factor_idx >= 0:
if common_factors[factor_idx] < max_job_array_size:
job_array_numbers = '1-' + str(common_factors[factor_idx])
no_of_arrays = common_factors[factor_idx]
no_of_unique_kos_per_array_job = common_factors[(len(common_factors)-1) - factor_idx]
factor_idx = -1
else:
factor_idx -= 1
# raise error if no suitable factors found!
if job_array_numbers is None:
raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_kos=', no_of_unique_kos, ' and the common factors of this number are:', common_factors)
# add some time to the walltime because I don't think the jobs have to startat the same time
walltime = '0-35:00:00'
output_dict['no_of_arrays'] = no_of_arrays
output_dict['no_of_unique_kos_per_array_job'] = no_of_unique_kos_per_array_job
output_dict['no_of_repetitions_of_each_ko'] = no_of_repetitions_of_each_ko
# calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway)
no_of_cores = no_of_repetitions_of_each_ko * no_of_unique_kos_per_array_job
output_dict['no_of_sims_per_array_job'] = no_of_cores
output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_ko + 1))
no_of_nodes = 1
# write the script to file
with open(output_filename, mode='wt', encoding='utf-8') as myfile:
myfile.write("#!/bin/bash -login" + "\n")
myfile.write("\n")
myfile.write("# This script was automatically created by <NAME>'s whole-cell modelling suite and is based on scripts that he created." + "\n")
myfile.write("\n")
myfile.write("## Job name" + "\n")
myfile.write("#SBATCH --job-name=" + slurm_job_name + "\n")
myfile.write("\n")
myfile.write("## What account the simulations are registered to" + "\n")
myfile.write("#SBATCH -A Flex1" + "\n")
myfile.write("\n")
myfile.write("## Resource request" + "\n")
# myfile.write("#SBATCH -n " + str(no_of_nodes) + "\n")
myfile.write("#SBATCH --ntasks=" + str(no_of_cores) + " # No. of cores\n")
myfile.write("#SBATCH --time=" + walltime + "\n")
myfile.write("#SBATCH -p " + queue_name + "\n")
myfile.write("\n")
myfile.write("## Job array request" + "\n")
myfile.write("#SBATCH --array=" + job_array_numbers + "\n")
myfile.write("\n")
myfile.write("## designate output and error files" + "\n")
myfile.write("#SBATCH --output=" + outfile_name_and_path + "_%A_%a.out" + "\n")
myfile.write("#SBATCH --error=" + errorfile_name_and_path + "_%A_%a.err" + "\n")
myfile.write("\n")
myfile.write("# print some details about the job" + "\n")
myfile.write('echo "The Array TASK ID is: ${SLURM_ARRAY_TASK_ID}"' + "\n")
myfile.write('echo "The Array JOB ID is: ${SLURM_ARRAY_JOB_ID}"' + "\n")
myfile.write('echo Running on host `hostname`' + "\n")
myfile.write('echo Time is `date`' + "\n")
myfile.write('echo Directory is `pwd`' + "\n")
myfile.write("\n")
myfile.write("# load required modules" + "\n")
myfile.write("module load apps/matlab-r2013a" + "\n")
myfile.write('echo "Modules loaded:"' + "\n")
myfile.write("module list" + "\n")
myfile.write("\n")
myfile.write("# create the master directory variable" + "\n")
myfile.write("master=" + wholecell_model_master_dir + "\n")
myfile.write("\n")
myfile.write("# create output directory" + "\n")
myfile.write("base_outDir=" + output_dir + "\n")
myfile.write("\n")
myfile.write("# collect the KO combos" + "\n")
myfile.write("ko_list=" + path_and_name_of_ko_codes + "\n")
myfile.write("ko_dir_names=" + path_and_name_of_unique_ko_dir_names + "\n")
myfile.write("\n")
myfile.write("# Get all the gene KOs and output folder names" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write('do' + "\n")
myfile.write(' Gene[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${SLURM_ARRAY_TASK_ID}-1)+${i})) ${ko_list})' + "\n")
myfile.write(' unique_ko_dir_name[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${SLURM_ARRAY_TASK_ID}-1)+${i})) ${ko_dir_names})' + "\n")
myfile.write("done" + "\n")
myfile.write("\n")
myfile.write("# go to master directory" + "\n")
myfile.write("cd ${master}" + "\n")
myfile.write("\n")
myfile.write("# NB have limited MATLAB to a single thread" + "\n")
myfile.write('options="-nodesktop -noFigureWindows -nosplash -singleCompThread"' + "\n")
myfile.write("\n")
myfile.write("# run " + str(no_of_unique_kos_per_array_job * no_of_repetitions_of_each_ko) + " simulations in parallel")
myfile.write('echo "Running simulations (single threaded) in parallel - let\'s start the timer!"' + "\n")
myfile.write('start=`date +%s`' + "\n")
myfile.write("\n")
myfile.write("# create all the directories for the diarys (the normal output will be all mixed up cause it's in parrallel!)" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write("do" + "\n")
myfile.write(' for j in `seq 1 ' + str(no_of_repetitions_of_each_ko) + '`' + "\n")
myfile.write(" do" + "\n")
myfile.write(' specific_ko="$(echo ${Gene[${i}]} | sed \'s/{//g\' | sed \'s/}//g\' | sed \"s/\'//g\" | sed \'s/\"//g\' | sed \'s/,/-/g\')/${j}"' + "\n")
myfile.write(' mkdir -p ${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}' + "\n")
myfile.write(' matlab ${options} -r "diary(\'${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}/diary.out\');addpath(\'${master}\');setWarnings();setPath();runSimulation(\'runner\',\'koRunner\',\'logToDisk\',true,\'outDir\',\'${base_outDir}/${unique_ko_dir_name[${i}]}/${j}\',\'jobNumber\',$((no_of_repetitions_of_each_ko*no_of_unique_kos_per_array_job*(${SLURM_ARRAY_TASK_ID}-1)+no_of_unique_kos_per_array_job*(${i}-1)+${j})),\'koList\',{{${Gene[${i}]}}});diary off;exit;" &' + "\n")
myfile.write(" done" + "\n")
myfile.write("done" + "\n")
myfile.write("wait" + "\n")
myfile.write("\n")
myfile.write("end=`date +%s`" + "\n")
myfile.write("runtime=$((end-start))" + "\n")
myfile.write('echo "$((${no_of_unique_kos_per_array_job}*${no_of_repetitions_of_each_ko})) simulations took: ${runtime} seconds."')
# give the file execute permissions
subprocess.check_call(["chmod", "700", str(output_filename)])
return output_dict
def getJobIdFromSubStdOut(self, stdout):
return int(re.search(r'\d+', stdout).group())
class C3ddb(Connection):
def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/scratch/users/ochalkley/wc/mg/output', base_runfiles_path = '/home/ochalkley/WholeCell/github/wc/mg/mit/runFiles', wholecell_model_master_dir = '/home/ochalkley/WholeCell/github/wc/mg/WholeCell-master'):
Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email)
self.submit_command = 'sbatch'
self.information_about_cluster = 'Commonwealth Computational Cloud for Data Driven Biology, Massachusetts Green High Performance Computer Center'
self.base_output_path = base_output_path
self.base_runfiles_path = base_runfiles_path
self.wholecell_model_master_dir = wholecell_model_master_dir
self.activate_venv_list = ['source activate wholecell_modelling_suite_py36']
# create Bc3 connection to read/write with databases on flex1
from connections import Bc3
bc3_conn = Bc3('oc13378', 'bc3', '/users/oc13378/.ssh/uob/uob-rsa', 'Oliver', 'Chalkley', '<EMAIL>')
self.db_connection = bc3_conn
self.path_to_database_dir = '/home/ochalkley/WholeCell/github/wholecell_modelling_suite'
#instance methhods
# def rsyncFile(self, source, destination, rsync_flags = "-aP"):
# super(bc3, self).rsyncFile(source, destination, rsync_flags)
#
# return
def convertGeneCodeToId(self, tuple_of_gene_codes, path_to_staticDb_stuff='/panfs/panasas01/bluegem-flex1/database/staticDB'):
"""Takes a tuple of genes code and returns a tuple of corresponding gene IDs."""
if type(tuple_of_gene_codes) is not tuple:
raise TypeException('Gene codes must be a tuple (even if only 1! i.e. single_tuple = (\'MG_001\',)) here type(tuple_of_gene_codes)=', type(tuple_of_gene_codes))
add_anoconda_module = 'module add languages/python-anaconda-4.2-3.5'
activate_virtual_environment = 'source activate wholecell_modelling_suite'
change_to_lib_dir = 'cd ' + path_to_staticDb_stuff
get_gene_id = 'python -c "from staticDB import io as sio;static_db_conn = sio();print(static_db_conn.CodeToId(' + str(tuple_of_gene_codes) + '))"'
cmd = "ssh " + self.db_connection.ssh_config_alias + ";" + add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id
cmd_list = ["ssh", self.db_connection.ssh_config_alias, add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id]
raw_out = self.db_connection.getOutput(cmd_list)
# send command and get output
output = raw_out
output[1] = eval(str(output[1], "utf-8").rstrip())
# it doesn't output the answer in the order you input it so we need to make a dictionary
codeToId_dict = {}
for out in output[1]:
codeToId_dict[out[1]] = out[0]
return codeToId_dict
def checkQueue(self, job_number):
"""This function takes a job number and returns a list of all the array numbers of that job still running."""
grep_part_of_cmd = "squeue -ru " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"_\" \'{print $2}\'"
# cmd = ["ssh", self.ssh_config_alias, grep_part_of_cmd]
output_dict = self.checkSuccess(self.sendCommand, [grep_part_of_cmd])
return output_dict
def checkDiskUsage(self):
"""This function returns disk usage details."""
# create all the post connection commands needed
get_disk_usage_units_command = "pan_quota | awk \'{print $1}\' | tail -n 2 | head -n 1 | sed \'s/[<>]//g\'"
get_disk_usage_command = "pan_quota | awk \'{print $1}\' | tail -n 1"
get_disk_usage_soft_limit_command = "pan_quota | awk \'{print $2}\' | tail -n 1"
get_disk_usage_hard_limit_command = "pan_quota | awk \'{print $3}\' | tail -n 1"
# combine the connection command with the post connection commands in a list (as is recomended).
units_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_units_command]
usage_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_command]
soft_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_soft_limit_command]
hard_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_hard_limit_command]
# send the commands and save the exit codes and outputs
units = Connection.getOutput(units_cmd)
usage = Connection.getOutput(usage_cmd)
soft_limit = Connection.getOutput(soft_limit_cmd)
hard_limit = Connection.getOutput(hard_limit_cmd)
# convert string outputs to floats where neccessary
units[1] = str(units[1], "utf-8").rstrip()
usage[1] = float(usage[1])
soft_limit[1] = float(soft_limit[1])
hard_limit[1] = float(hard_limit[1])
# print some stats
print(100 * (usage[1] / (1.0 * hard_limit[1]) ),"% of total disk space used.\n\n",hard_limit[1] - usage[1]," ",units[1]," left until hard limit.\n\n",soft_limit[1] - usage[1]," ",units[1]," left unit soft limit.", sep='')
return usage, soft_limit, hard_limit, units
def createStandardKoSubmissionScript(self, output_filename, slurm_job_name, no_of_unique_kos, path_and_name_of_unique_ko_dir_names, no_of_repetitions_of_each_ko, wholecell_model_master_dir, output_dir, path_and_name_of_ko_codes, outfile_name_and_path, errorfile_name_and_path):
# this shouldn't change but gonna leave it there just in case
queue_name = 'defq'
# set job array numbers to None so that we can check stuff has wprked later
job_array_numbers = None
# The maximum job array size on BC3
max_job_array_size = 200
# initialise output dict
output_dict = {}
# test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc)
total_sims = no_of_unique_kos * no_of_repetitions_of_each_ko
if total_sims > 20000:
raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims)
output_dict['total_sims'] = total_sims
# spread simulations across array jobs
if no_of_unique_kos <= max_job_array_size:
no_of_unique_kos_per_array_job = 1
no_of_arrays = no_of_unique_kos
job_array_numbers = '1-' + str(no_of_unique_kos)
walltime = '0-30:00:00'
else:
# job_array_size * no_of_unique_kos_per_array_job = no_of_unique_kos so all the factors of no_of_unique_kos is
common_factors = [x for x in range(1, no_of_unique_kos+1) if no_of_unique_kos % x == 0]
# make the job_array_size as large as possible such that it is less than max_job_array_size
factor_idx = len(common_factors) - 1
while factor_idx >= 0:
if common_factors[factor_idx] < max_job_array_size:
job_array_numbers = '1-' + str(common_factors[factor_idx])
no_of_arrays = common_factors[factor_idx]
no_of_unique_kos_per_array_job = common_factors[(len(common_factors)-1) - factor_idx]
factor_idx = -1
else:
factor_idx -= 1
# raise error if no suitable factors found!
if job_array_numbers is None:
raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_kos=', no_of_unique_kos, ' and the common factors of this number are:', common_factors)
# add some time to the walltime because I don't think the jobs have to startat the same time
walltime = '0-35:00:00'
output_dict['no_of_arrays'] = no_of_arrays
output_dict['no_of_unique_kos_per_array_job'] = no_of_unique_kos_per_array_job
output_dict['no_of_repetitions_of_each_ko'] = no_of_repetitions_of_each_ko
# calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway)
no_of_cores = no_of_repetitions_of_each_ko * no_of_unique_kos_per_array_job
output_dict['no_of_sims_per_array_job'] = no_of_cores
output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_ko + 1))
no_of_nodes = 1
# write the script to file
with open(output_filename, mode='wt', encoding='utf-8') as myfile:
myfile.write("#!/bin/bash" + "\n")
myfile.write("\n")
myfile.write("# This script was automatically created by <NAME>'s whole-cell modelling suite and is based on scripts that he created." + "\n")
myfile.write("\n")
myfile.write("## Job name" + "\n")
myfile.write("#SBATCH --job-name=" + slurm_job_name + "\n")
myfile.write("\n")
myfile.write("## Resource request" + "\n")
# myfile.write("#SBATCH -n " + str(no_of_nodes) + "\n")
myfile.write("#SBATCH --ntasks=" + str(no_of_cores) + " # No. of cores\n")
myfile.write("#SBATCH --time=" + walltime + "\n")
myfile.write("#SBATCH -p " + queue_name + "\n")
myfile.write("#SBATCH --mem-per-cpu=10000" + "\n")
myfile.write("\n")
myfile.write("## Job array request" + "\n")
myfile.write("#SBATCH --array=" + job_array_numbers + "\n")
myfile.write("\n")
myfile.write("## designate output and error files" + "\n")
myfile.write("#SBATCH --output=" + outfile_name_and_path + "_%A_%a.out" + "\n")
myfile.write("#SBATCH --error=" + errorfile_name_and_path + "_%A_%a.err" + "\n")
myfile.write("\n")
myfile.write("# print some details about the job" + "\n")
myfile.write('echo "The Array TASK ID is: ${SLURM_ARRAY_TASK_ID}"' + "\n")
myfile.write('echo "The Array JOB ID is: ${SLURM_ARRAY_JOB_ID}"' + "\n")
myfile.write('echo Running on host `hostname`' + "\n")
myfile.write('echo Time is `date`' + "\n")
myfile.write('echo Directory is `pwd`' + "\n")
myfile.write("\n")
myfile.write("# load required modules" + "\n")
myfile.write("module load mit/matlab/2013a" + "\n")
myfile.write('echo "Modules loaded:"' + "\n")
myfile.write("module list" + "\n")
myfile.write("\n")
myfile.write("# create the master directory variable" + "\n")
myfile.write("master=" + wholecell_model_master_dir + "\n")
myfile.write("\n")
myfile.write("# create output directory" + "\n")
myfile.write("base_outDir=" + output_dir + "\n")
myfile.write("\n")
myfile.write("# collect the KO combos" + "\n")
myfile.write("ko_list=" + path_and_name_of_ko_codes + "\n")
myfile.write("ko_dir_names=" + path_and_name_of_unique_ko_dir_names + "\n")
myfile.write("\n")
myfile.write("# Get all the gene KOs and output folder names" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write('do' + "\n")
myfile.write(' Gene[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${SLURM_ARRAY_TASK_ID}-1)+${i})) ${ko_list})' + "\n")
myfile.write(' unique_ko_dir_name[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${SLURM_ARRAY_TASK_ID}-1)+${i})) ${ko_dir_names})' + "\n")
myfile.write("done" + "\n")
myfile.write("\n")
myfile.write("# go to master directory" + "\n")
myfile.write("cd ${master}" + "\n")
myfile.write("\n")
myfile.write("# NB have limited MATLAB to a single thread" + "\n")
myfile.write('options="-nodesktop -noFigureWindows -nosplash -singleCompThread"' + "\n")
myfile.write("\n")
myfile.write("# run " + str(no_of_unique_kos_per_array_job * no_of_repetitions_of_each_ko) + " simulations in parallel")
myfile.write('echo "Running simulations (single threaded) in parallel - let\'s start the timer!"' + "\n")
myfile.write('start=`date +%s`' + "\n")
myfile.write("\n")
myfile.write("# create all the directories for the diarys (the normal output will be all mixed up cause it's in parrallel!)" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write("do" + "\n")
myfile.write(' for j in `seq 1 ' + str(no_of_repetitions_of_each_ko) + '`' + "\n")
myfile.write(" do" + "\n")
myfile.write(' specific_ko="$(echo ${Gene[${i}]} | sed \'s/{//g\' | sed \'s/}//g\' | sed \"s/\'//g\" | sed \'s/\"//g\' | sed \'s/,/-/g\')/${j}"' + "\n")
myfile.write(' mkdir -p ${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}' + "\n")
myfile.write(' matlab ${options} -r "diary(\'${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}/diary.out\');addpath(\'${master}\');setWarnings();setPath();runSimulation(\'runner\',\'koRunner\',\'logToDisk\',true,\'outDir\',\'${base_outDir}/${unique_ko_dir_name[${i}]}/${j}\',\'jobNumber\',$((no_of_repetitions_of_each_ko*no_of_unique_kos_per_array_job*(${SLURM_ARRAY_TASK_ID}-1)+no_of_unique_kos_per_array_job*(${i}-1)+${j})),\'koList\',{{${Gene[${i}]}}});diary off;exit;" &' + "\n")
myfile.write(" done" + "\n")
myfile.write("done" + "\n")
myfile.write("wait" + "\n")
myfile.write("\n")
myfile.write("end=`date +%s`" + "\n")
myfile.write("runtime=$((end-start))" + "\n")
myfile.write('echo "$((${no_of_unique_kos_per_array_job}*${no_of_repetitions_of_each_ko})) simulations took: ${runtime} seconds."')
# give the file execute permissions
subprocess.check_call(["chmod", "700", str(output_filename)])
return output_dict
def getJobIdFromSubStdOut(self, stdout):
return int(re.search(r'\d+', stdout).group())
class C3ddbWithOutScratch(Connection):
def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/home/ochalkley/wc/mg/output', base_runfiles_path = '/home/ochalkley/WholeCell/github/wc/mg/mit/runFiles', wholecell_model_master_dir = '/home/ochalkley/WholeCell/github/wc/mg/WholeCell-master'):
Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email)
self.submit_command = 'sbatch'
self.information_about_cluster = 'Commonwealth Computational Cloud for Data Driven Biology, Massachusetts Green High Performance Computer Center'
self.base_output_path = base_output_path
self.base_runfiles_path = base_runfiles_path
self.wholecell_model_master_dir = wholecell_model_master_dir
self.activate_venv_list = ['source activate wholecell_modelling_suite_py36']
# create Bc3 connection to read/write with databases on flex1
from connections import Bc3
bc3_conn = Bc3('oc13378', 'bc3', '/users/oc13378/.ssh/uob/uob-rsa', 'Oliver', 'Chalkley', '<EMAIL>')
self.db_connection = bc3_conn
self.path_to_database_dir = '/home/ochalkley/WholeCell/github/wholecell_modelling_suite'
#instance methhods
# def rsyncFile(self, source, destination, rsync_flags = "-aP"):
# super(bc3, self).rsyncFile(source, destination, rsync_flags)
#
# return
def convertGeneCodeToId(self, tuple_of_gene_codes, path_to_staticDb_stuff='/panfs/panasas01/bluegem-flex1/database/staticDB'):
"""Takes a tuple of genes code and returns a tuple of corresponding gene IDs."""
if type(tuple_of_gene_codes) is not tuple:
raise TypeException('Gene codes must be a tuple (even if only 1! i.e. single_tuple = (\'MG_001\',)) here type(tuple_of_gene_codes)=', type(tuple_of_gene_codes))
add_anoconda_module = 'module add languages/python-anaconda-4.2-3.5'
activate_virtual_environment = 'source activate wholecell_modelling_suite'
change_to_lib_dir = 'cd ' + path_to_staticDb_stuff
get_gene_id = 'python -c "from staticDB import io as sio;static_db_conn = sio();print(static_db_conn.CodeToId(' + str(tuple_of_gene_codes) + '))"'
cmd = "ssh " + self.db_connection.ssh_config_alias + ";" + add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id
cmd_list = ["ssh", self.db_connection.ssh_config_alias, add_anoconda_module + ";" + activate_virtual_environment + ";" + change_to_lib_dir + ";" + get_gene_id]
raw_out = self.db_connection.getOutput(cmd_list)
# send command and get output
output = raw_out
output[1] = eval(str(output[1], "utf-8").rstrip())
# it doesn't output the answer in the order you input it so we need to make a dictionary
codeToId_dict = {}
for out in output[1]:
codeToId_dict[out[1]] = out[0]
return codeToId_dict
def checkQueue(self, job_number):
"""This function takes a job number and returns a list of all the array numbers of that job still running."""
grep_part_of_cmd = "squeue -ru " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"_\" \'{print $2}\'"
# cmd = ["ssh", self.ssh_config_alias, grep_part_of_cmd]
output_dict = self.checkSuccess(self.sendCommand, [grep_part_of_cmd])
return output_dict
def checkDiskUsage(self):
"""This function returns disk usage details."""
# create all the post connection commands needed
get_disk_usage_units_command = "pan_quota | awk \'{print $1}\' | tail -n 2 | head -n 1 | sed \'s/[<>]//g\'"
get_disk_usage_command = "pan_quota | awk \'{print $1}\' | tail -n 1"
get_disk_usage_soft_limit_command = "pan_quota | awk \'{print $2}\' | tail -n 1"
get_disk_usage_hard_limit_command = "pan_quota | awk \'{print $3}\' | tail -n 1"
# combine the connection command with the post connection commands in a list (as is recomended).
units_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_units_command]
usage_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_command]
soft_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_soft_limit_command]
hard_limit_cmd = ["ssh", self.ssh_config_alias, get_disk_usage_hard_limit_command]
# send the commands and save the exit codes and outputs
units = Connection.getOutput(units_cmd)
usage = Connection.getOutput(usage_cmd)
soft_limit = Connection.getOutput(soft_limit_cmd)
hard_limit = Connection.getOutput(hard_limit_cmd)
# convert string outputs to floats where neccessary
units[1] = str(units[1], "utf-8").rstrip()
usage[1] = float(usage[1])
soft_limit[1] = float(soft_limit[1])
hard_limit[1] = float(hard_limit[1])
# print some stats
print(100 * (usage[1] / (1.0 * hard_limit[1]) ),"% of total disk space used.\n\n",hard_limit[1] - usage[1]," ",units[1]," left until hard limit.\n\n",soft_limit[1] - usage[1]," ",units[1]," left unit soft limit.", sep='')
return usage, soft_limit, hard_limit, units
def createStandardKoSubmissionScript(self, output_filename, slurm_job_name, no_of_unique_kos, path_and_name_of_unique_ko_dir_names, no_of_repetitions_of_each_ko, wholecell_model_master_dir, output_dir, path_and_name_of_ko_codes, outfile_name_and_path, errorfile_name_and_path):
# this shouldn't change but gonna leave it there just in case
queue_name = 'defq'
# set job array numbers to None so that we can check stuff has wprked later
job_array_numbers = None
# The maximum job array size on BC3
max_job_array_size = 200
# initialise output dict
output_dict = {}
# test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc)
total_sims = no_of_unique_kos * no_of_repetitions_of_each_ko
if total_sims > 20000:
raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims)
output_dict['total_sims'] = total_sims
# spread simulations across array jobs
if no_of_unique_kos <= max_job_array_size:
no_of_unique_kos_per_array_job = 1
no_of_arrays = no_of_unique_kos
job_array_numbers = '1-' + str(no_of_unique_kos)
walltime = '0-30:00:00'
else:
# job_array_size * no_of_unique_kos_per_array_job = no_of_unique_kos so all the factors of no_of_unique_kos is
common_factors = [x for x in range(1, no_of_unique_kos+1) if no_of_unique_kos % x == 0]
# make the job_array_size as large as possible such that it is less than max_job_array_size
factor_idx = len(common_factors) - 1
while factor_idx >= 0:
if common_factors[factor_idx] < max_job_array_size:
job_array_numbers = '1-' + str(common_factors[factor_idx])
no_of_arrays = common_factors[factor_idx]
no_of_unique_kos_per_array_job = common_factors[(len(common_factors)-1) - factor_idx]
factor_idx = -1
else:
factor_idx -= 1
# raise error if no suitable factors found!
if job_array_numbers is None:
raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_kos=', no_of_unique_kos, ' and the common factors of this number are:', common_factors)
# add some time to the walltime because I don't think the jobs have to startat the same time
walltime = '0-35:00:00'
output_dict['no_of_arrays'] = no_of_arrays
output_dict['no_of_unique_kos_per_array_job'] = no_of_unique_kos_per_array_job
output_dict['no_of_repetitions_of_each_ko'] = no_of_repetitions_of_each_ko
# calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway)
no_of_cores = no_of_repetitions_of_each_ko * no_of_unique_kos_per_array_job
output_dict['no_of_sims_per_array_job'] = no_of_cores
output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_ko + 1))
no_of_nodes = 1
# write the script to file
with open(output_filename, mode='wt', encoding='utf-8') as myfile:
myfile.write("#!/bin/bash" + "\n")
myfile.write("\n")
myfile.write("# This script was automatically created by <NAME>'s whole-cell modelling suite and is based on scripts that he created." + "\n")
myfile.write("\n")
myfile.write("## Job name" + "\n")
myfile.write("#SBATCH --job-name=" + slurm_job_name + "\n")
myfile.write("\n")
myfile.write("## Resource request" + "\n")
# myfile.write("#SBATCH -n " + str(no_of_nodes) + "\n")
myfile.write("#SBATCH --ntasks=" + str(no_of_cores) + " # No. of cores\n")
myfile.write("#SBATCH --time=" + walltime + "\n")
myfile.write("#SBATCH -p " + queue_name + "\n")
myfile.write("#SBATCH --mem-per-cpu=10000" + "\n")
myfile.write("\n")
myfile.write("## Job array request" + "\n")
myfile.write("#SBATCH --array=" + job_array_numbers + "\n")
myfile.write("\n")
myfile.write("## designate output and error files" + "\n")
myfile.write("#SBATCH --output=" + outfile_name_and_path + "_%A_%a.out" + "\n")
myfile.write("#SBATCH --error=" + errorfile_name_and_path + "_%A_%a.err" + "\n")
myfile.write("\n")
myfile.write("# print some details about the job" + "\n")
myfile.write('echo "The Array TASK ID is: ${SLURM_ARRAY_TASK_ID}"' + "\n")
myfile.write('echo "The Array JOB ID is: ${SLURM_ARRAY_JOB_ID}"' + "\n")
myfile.write('echo Running on host `hostname`' + "\n")
myfile.write('echo Time is `date`' + "\n")
myfile.write('echo Directory is `pwd`' + "\n")
myfile.write("\n")
myfile.write("# load required modules" + "\n")
myfile.write("module load mit/matlab/2013a" + "\n")
myfile.write('echo "Modules loaded:"' + "\n")
myfile.write("module list" + "\n")
myfile.write("\n")
myfile.write("# create the master directory variable" + "\n")
myfile.write("master=" + wholecell_model_master_dir + "\n")
myfile.write("\n")
myfile.write("# create output directory" + "\n")
myfile.write("base_outDir=" + output_dir + "\n")
myfile.write("\n")
myfile.write("# collect the KO combos" + "\n")
myfile.write("ko_list=" + path_and_name_of_ko_codes + "\n")
myfile.write("ko_dir_names=" + path_and_name_of_unique_ko_dir_names + "\n")
myfile.write("\n")
myfile.write("# Get all the gene KOs and output folder names" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write('do' + "\n")
myfile.write(' Gene[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${SLURM_ARRAY_TASK_ID}-1)+${i})) ${ko_list})' + "\n")
myfile.write(' unique_ko_dir_name[${i}]=$(awk NR==$((' + str(no_of_unique_kos_per_array_job) + '*(${SLURM_ARRAY_TASK_ID}-1)+${i})) ${ko_dir_names})' + "\n")
myfile.write("done" + "\n")
myfile.write("\n")
myfile.write("# go to master directory" + "\n")
myfile.write("cd ${master}" + "\n")
myfile.write("\n")
myfile.write("# NB have limited MATLAB to a single thread" + "\n")
myfile.write('options="-nodesktop -noFigureWindows -nosplash -singleCompThread"' + "\n")
myfile.write("\n")
myfile.write("# run " + str(no_of_unique_kos_per_array_job * no_of_repetitions_of_each_ko) + " simulations in parallel")
myfile.write('echo "Running simulations (single threaded) in parallel - let\'s start the timer!"' + "\n")
myfile.write('start=`date +%s`' + "\n")
myfile.write("\n")
myfile.write("# create all the directories for the diarys (the normal output will be all mixed up cause it's in parrallel!)" + "\n")
myfile.write('for i in `seq 1 ' + str(no_of_unique_kos_per_array_job) + '`' + "\n")
myfile.write("do" + "\n")
myfile.write(' for j in `seq 1 ' + str(no_of_repetitions_of_each_ko) + '`' + "\n")
myfile.write(" do" + "\n")
myfile.write(' specific_ko="$(echo ${Gene[${i}]} | sed \'s/{//g\' | sed \'s/}//g\' | sed \"s/\'//g\" | sed \'s/\"//g\' | sed \'s/,/-/g\')/${j}"' + "\n")
myfile.write(' mkdir -p ${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}' + "\n")
myfile.write(' matlab ${options} -r "diary(\'${base_outDir}/${unique_ko_dir_name[${i}]}/diary${j}/diary.out\');addpath(\'${master}\');setWarnings();setPath();runSimulation(\'runner\',\'koRunner\',\'logToDisk\',true,\'outDir\',\'${base_outDir}/${unique_ko_dir_name[${i}]}/${j}\',\'jobNumber\',$((no_of_repetitions_of_each_ko*no_of_unique_kos_per_array_job*(${SLURM_ARRAY_TASK_ID}-1)+no_of_unique_kos_per_array_job*(${i}-1)+${j})),\'koList\',{{${Gene[${i}]}}});diary off;exit;" &' + "\n")
myfile.write(" done" + "\n")
myfile.write("done" + "\n")
myfile.write("wait" + "\n")
myfile.write("\n")
myfile.write("end=`date +%s`" + "\n")
myfile.write("runtime=$((end-start))" + "\n")
myfile.write('echo "$((${no_of_unique_kos_per_array_job}*${no_of_repetitions_of_each_ko})) simulations took: ${runtime} seconds."')
# give the file execute permissions
subprocess.check_call(["chmod", "700", str(output_filename)])
return output_dict
def getJobIdFromSubStdOut(self, stdout):
return int(re.search(r'\d+', stdout).group())
| [
"base_connection.Connection.getOutput",
"datetime.datetime.now",
"connections.Bc3",
"base_connection.Connection.__init__",
"re.search"
] | [((503, 629), 'base_connection.Connection.__init__', 'Connection.__init__', (['self', 'cluster_user_name', 'ssh_config_alias', 'path_to_key', 'forename_of_user', 'surname_of_user', 'user_email'], {}), '(self, cluster_user_name, ssh_config_alias, path_to_key,\n forename_of_user, surname_of_user, user_email)\n', (522, 629), False, 'from base_connection import Connection\n'), ((2854, 2884), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['cmd_list'], {}), '(cmd_list)\n', (2874, 2884), False, 'from base_connection import Connection\n'), ((3768, 3798), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['cmd_list'], {}), '(cmd_list)\n', (3788, 3798), False, 'from base_connection import Connection\n'), ((4898, 4928), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['cmd_list'], {}), '(cmd_list)\n', (4918, 4928), False, 'from base_connection import Connection\n'), ((6689, 6720), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['units_cmd'], {}), '(units_cmd)\n', (6709, 6720), False, 'from base_connection import Connection\n'), ((6731, 6762), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['usage_cmd'], {}), '(usage_cmd)\n', (6751, 6762), False, 'from base_connection import Connection\n'), ((6778, 6814), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['soft_limit_cmd'], {}), '(soft_limit_cmd)\n', (6798, 6814), False, 'from base_connection import Connection\n'), ((6830, 6866), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['hard_limit_cmd'], {}), '(hard_limit_cmd)\n', (6850, 6866), False, 'from base_connection import Connection\n'), ((16323, 16449), 'base_connection.Connection.__init__', 'Connection.__init__', (['self', 'cluster_user_name', 'ssh_config_alias', 'path_to_key', 'forename_of_user', 'surname_of_user', 'user_email'], {}), '(self, cluster_user_name, ssh_config_alias, path_to_key,\n forename_of_user, surname_of_user, user_email)\n', (16342, 16449), False, 'from base_connection import Connection\n'), ((18157, 18187), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['cmd_list'], {}), '(cmd_list)\n', (18177, 18187), False, 'from base_connection import Connection\n'), ((19889, 19920), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['units_cmd'], {}), '(units_cmd)\n', (19909, 19920), False, 'from base_connection import Connection\n'), ((19931, 19962), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['usage_cmd'], {}), '(usage_cmd)\n', (19951, 19962), False, 'from base_connection import Connection\n'), ((19978, 20014), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['soft_limit_cmd'], {}), '(soft_limit_cmd)\n', (19998, 20014), False, 'from base_connection import Connection\n'), ((20030, 20066), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['hard_limit_cmd'], {}), '(hard_limit_cmd)\n', (20050, 20066), False, 'from base_connection import Connection\n'), ((29270, 29396), 'base_connection.Connection.__init__', 'Connection.__init__', (['self', 'cluster_user_name', 'ssh_config_alias', 'path_to_key', 'forename_of_user', 'surname_of_user', 'user_email'], {}), '(self, cluster_user_name, ssh_config_alias, path_to_key,\n forename_of_user, surname_of_user, user_email)\n', (29289, 29396), False, 'from base_connection import Connection\n'), ((29913, 30006), 'connections.Bc3', 'Bc3', (['"""oc13378"""', '"""bc3"""', '"""/users/oc13378/.ssh/uob/uob-rsa"""', '"""Oliver"""', '"""Chalkley"""', '"""<EMAIL>"""'], {}), "('oc13378', 'bc3', '/users/oc13378/.ssh/uob/uob-rsa', 'Oliver',\n 'Chalkley', '<EMAIL>')\n", (29916, 30006), False, 'from connections import Bc3\n'), ((33137, 33168), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['units_cmd'], {}), '(units_cmd)\n', (33157, 33168), False, 'from base_connection import Connection\n'), ((33179, 33210), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['usage_cmd'], {}), '(usage_cmd)\n', (33199, 33210), False, 'from base_connection import Connection\n'), ((33226, 33262), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['soft_limit_cmd'], {}), '(soft_limit_cmd)\n', (33246, 33262), False, 'from base_connection import Connection\n'), ((33278, 33314), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['hard_limit_cmd'], {}), '(hard_limit_cmd)\n', (33298, 33314), False, 'from base_connection import Connection\n'), ((42428, 42554), 'base_connection.Connection.__init__', 'Connection.__init__', (['self', 'cluster_user_name', 'ssh_config_alias', 'path_to_key', 'forename_of_user', 'surname_of_user', 'user_email'], {}), '(self, cluster_user_name, ssh_config_alias, path_to_key,\n forename_of_user, surname_of_user, user_email)\n', (42447, 42554), False, 'from base_connection import Connection\n'), ((43071, 43164), 'connections.Bc3', 'Bc3', (['"""oc13378"""', '"""bc3"""', '"""/users/oc13378/.ssh/uob/uob-rsa"""', '"""Oliver"""', '"""Chalkley"""', '"""<EMAIL>"""'], {}), "('oc13378', 'bc3', '/users/oc13378/.ssh/uob/uob-rsa', 'Oliver',\n 'Chalkley', '<EMAIL>')\n", (43074, 43164), False, 'from connections import Bc3\n'), ((46295, 46326), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['units_cmd'], {}), '(units_cmd)\n', (46315, 46326), False, 'from base_connection import Connection\n'), ((46337, 46368), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['usage_cmd'], {}), '(usage_cmd)\n', (46357, 46368), False, 'from base_connection import Connection\n'), ((46384, 46420), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['soft_limit_cmd'], {}), '(soft_limit_cmd)\n', (46404, 46420), False, 'from base_connection import Connection\n'), ((46436, 46472), 'base_connection.Connection.getOutput', 'Connection.getOutput', (['hard_limit_cmd'], {}), '(hard_limit_cmd)\n', (46456, 46472), False, 'from base_connection import Connection\n'), ((15918, 15943), 're.search', 're.search', (['"""\\\\d+"""', 'stdout'], {}), "('\\\\d+', stdout)\n", (15927, 15943), False, 'import re\n'), ((28865, 28890), 're.search', 're.search', (['"""\\\\d+"""', 'stdout'], {}), "('\\\\d+', stdout)\n", (28874, 28890), False, 'import re\n'), ((42018, 42043), 're.search', 're.search', (['"""\\\\d+"""', 'stdout'], {}), "('\\\\d+', stdout)\n", (42027, 42043), False, 'import re\n'), ((55176, 55201), 're.search', 're.search', (['"""\\\\d+"""', 'stdout'], {}), "('\\\\d+', stdout)\n", (55185, 55201), False, 'import re\n'), ((11096, 11119), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11117, 11119), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for calculating compatible binhosts.
Generates a file that sets the specified board's binhosts to include all of the
other compatible boards in this buildroot.
"""
from __future__ import print_function
import collections
import glob
import os
import sys
from chromite.lib import commandline
from chromite.lib import portage_util
def FindCandidateBoards():
"""Find candidate local boards to grab prebuilts from."""
portageq_prefix = '/usr/local/bin/portageq-'
for path in sorted(glob.glob('%s*' % portageq_prefix)):
# Strip off the portageq prefix, leaving only the board.
yield path.replace(portageq_prefix, '')
def SummarizeCompatibility(board):
"""Returns a string that will be the same for compatible boards."""
result = portage_util.PortageqEnvvars(['ARCH', 'CFLAGS'], board=board)
return '%s %s' % (result['ARCH'], result['CFLAGS'])
def GenerateBinhostLine(build_root, compatible_boards):
"""Generate a binhost line pulling binaries from the specified boards."""
# TODO(davidjames): Prioritize binhosts with more matching use flags.
local_binhosts = ' '.join([
'file://localhost' + os.path.join(build_root, x, 'packages')
for x in sorted(compatible_boards)])
return "LOCAL_BINHOST='%s'" % local_binhosts
def GetParser():
"""Return a command line parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--build_root', default='/build',
help='Location of boards (normally %(default)s)')
parser.add_argument('--board', required=True,
help='Board name')
return parser
def main(argv):
parser = GetParser()
flags = parser.parse_args(argv)
by_compatibility = collections.defaultdict(set)
compatible_boards = None
for other_board in FindCandidateBoards():
compat_id = SummarizeCompatibility(other_board)
if other_board == flags.board:
compatible_boards = by_compatibility[compat_id]
else:
by_compatibility[compat_id].add(other_board)
if compatible_boards is None:
print('Missing portageq wrapper for %s' % flags.board, file=sys.stderr)
sys.exit(1)
print('# Generated by cros_generate_local_binhosts.')
print(GenerateBinhostLine(flags.build_root, compatible_boards))
| [
"chromite.lib.commandline.ArgumentParser",
"os.path.join",
"chromite.lib.portage_util.PortageqEnvvars",
"collections.defaultdict",
"sys.exit",
"glob.glob"
] | [((954, 1015), 'chromite.lib.portage_util.PortageqEnvvars', 'portage_util.PortageqEnvvars', (["['ARCH', 'CFLAGS']"], {'board': 'board'}), "(['ARCH', 'CFLAGS'], board=board)\n", (982, 1015), False, 'from chromite.lib import portage_util\n'), ((1531, 1578), 'chromite.lib.commandline.ArgumentParser', 'commandline.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (1557, 1578), False, 'from chromite.lib import commandline\n'), ((1909, 1937), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (1932, 1937), False, 'import collections\n'), ((694, 728), 'glob.glob', 'glob.glob', (["('%s*' % portageq_prefix)"], {}), "('%s*' % portageq_prefix)\n", (703, 728), False, 'import glob\n'), ((2324, 2335), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2332, 2335), False, 'import sys\n'), ((1333, 1372), 'os.path.join', 'os.path.join', (['build_root', 'x', '"""packages"""'], {}), "(build_root, x, 'packages')\n", (1345, 1372), False, 'import os\n')] |
import numpy as np
from scipy.ndimage import gaussian_filter
from operator import itemgetter
from utils import *
import os
import time
def setup_header(hdr_arr):
k = ['CAL_FLAT','CAL_USH','SIGM_USH',
'CAL_PRE','CAL_GHST','CAL_REAL',
'CAL_CRT0','CAL_CRT1','CAL_CRT2','CAL_CRT3','CAL_CRT4','CAL_CRT5',
'CAL_CRT6','CAL_CRT7','CAL_CRT8','CAL_CRT9',
'CAL_NORM','CAL_FRIN','CAL_PSF','CAL_IPOL',
'CAL_SCIP','RTE_MOD','RTE_SW','RTE_ITER']
v = [0,' ',' ',
' ','None ','NA',
0,0,0,0,0,0,
0,0,0,0,
' ','NA','NA',' ',
'None',' ',' ',4294967295]
c = ['Onboard calibrated for gain table','Unsharp masking correction','Sigma for unsharp masking [px]',
'Prefilter correction (DID/file)','Ghost correction (name + version of module','Prealigment of images before demodulation',
'cross-talk from I to Q (slope)','cross-talk from I to Q (offset)','cross-talk from I to U (slope)','cross-talk from I to U (offset)','cross-talk from I to V (slope)','cross-talk from I to V (offset)',
'cross-talk from V to Q (slope)','cross-talk from V to Q (offset)','cross-talk from V to U (slope)','cross-talk from V to U (offset)',
'Normalization (normalization constant PROC_Ic)','Fringe correction (name + version of module)','Onboard calibrated for instrumental PSF','Onboard calibrated for instrumental polarization',
'Onboard scientific data analysis','Inversion mode','Inversion software','Number RTE inversion iterations']
for h in hdr_arr:
for i in range(len(k)):
if k[i] in h: # Check for existence
h[k[i]] = v[i]
else:
if i==0:
h.set(k[i], v[i], c[i], after='CAL_DARK')
else:
h.set(k[i], v[i], c[i], after=k[i-1])
return hdr_arr
def load_flat(flat_f, accum_scaling, bit_conversion, scale_data, header_imgdirx_exists, imgdirx_flipped, cpos_arr) -> np.ndarray:
"""
load, scale, flip and correct flat
"""
print(" ")
printc('-->>>>>>> Reading Flats',color=bcolors.OKGREEN)
start_time = time.time()
# flat from IP-5
if '0024151020000' in flat_f or '0024150020000' in flat_f:
flat, header_flat = get_data(flat_f, scaling = accum_scaling, bit_convert_scale=bit_conversion,
scale_data=False)
else:
flat, header_flat = get_data(flat_f, scaling = accum_scaling, bit_convert_scale=bit_conversion,
scale_data=scale_data)
if 'IMGDIRX' in header_flat:
header_fltdirx_exists = True
fltdirx_flipped = str(header_flat['IMGDIRX'])
else:
header_fltdirx_exists = False
fltdirx_flipped = 'NO'
print(f"Flat field shape is {flat.shape}")
# correction based on science data - see if flat and science are both flipped or not
flat = compare_IMGDIRX(flat,header_imgdirx_exists,imgdirx_flipped,header_fltdirx_exists,fltdirx_flipped)
flat = np.moveaxis(flat, 0,-1) #so that it is [y,x,24]
flat = flat.reshape(2048,2048,6,4) #separate 24 images, into 6 wavelengths, with each 4 pol states
flat = np.moveaxis(flat, 2,-1)
print(flat.shape)
_, _, _, cpos_f = fits_get_sampling(flat_f,verbose = True) #get flat continuum position
print(f"The continuum position of the flat field is at {cpos_f} index position")
#--------
# test if the science and flat have continuum at same position
#--------
flat = compare_cpos(flat,cpos_f,cpos_arr[0])
flat_pmp_temp = str(header_flat['HPMPTSP1'])
print(f"Flat PMP Temperature Set Point: {flat_pmp_temp}")
#--------
# correct for missing line in particular flat field
#--------
if flat_f[-15:] == '0162201100.fits': # flat_f[-62:] == 'solo_L0_phi-hrt-flat_0667134081_V202103221851C_0162201100.fits'
print("This flat has a missing line - filling in with neighbouring pixels")
flat_copy = flat.copy()
flat[:,:,1,1] = filling_data(flat_copy[:,:,1,1], 0, mode = {'exact rows':[1345,1346]}, axis=1)
del flat_copy
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------ Load flats time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return flat
def load_dark(dark_f) -> np.ndarray:
"""
loads dark field from given path
"""
print(" ")
printc('-->>>>>>> Reading Darks',color=bcolors.OKGREEN)
start_time = time.time()
try:
dark,_ = get_data(dark_f)
dark_shape = dark.shape
if dark_shape != (2048,2048):
if dark.ndim > 2:
printc("Dark Field Input File has more dimensions than the expected 2048,2048 format: {}",dark_f,color=bcolors.WARNING)
raise ValueError
printc("Dark Field Input File not in 2048,2048 format: {}",dark_f,color=bcolors.WARNING)
printc("Attempting to correct ",color=bcolors.WARNING)
try:
if dark_shape[0] > 2048:
dark = dark[dark_shape[0]-2048:,:]
except Exception:
printc("ERROR, Unable to correct shape of dark field data: {}",dark_f,color=bcolors.FAIL)
raise ValueError
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------ Load darks time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return dark
except Exception:
printc("ERROR, Unable to open and process darks file: {}",dark_f,color=bcolors.FAIL)
def apply_dark_correction(data, flat, dark, rows, cols) -> np.ndarray:
"""
subtracts dark field from flat field and science data
"""
print(" ")
print("-->>>>>>> Subtracting dark field")
start_time = time.time()
data -= dark[rows,cols, np.newaxis, np.newaxis, np.newaxis]
#flat -= dark[..., np.newaxis, np.newaxis] - # all processed flat fields should already be dark corrected
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------- Dark Field correction time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return data, flat
def normalise_flat(flat, flat_f, ceny, cenx) -> np.ndarray:
"""
normalise flat fields at each wavelength position to remove the spectral line
"""
print(" ")
printc('-->>>>>>> Normalising Flats',color=bcolors.OKGREEN)
start_time = time.time()
try:
norm_fac = np.mean(flat[ceny,cenx, :, :], axis = (0,1))[np.newaxis, np.newaxis, ...] #mean of the central 1k x 1k
flat /= norm_fac
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------- Normalising flat time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return flat
except Exception:
printc("ERROR, Unable to normalise the flat fields: {}",flat_f,color=bcolors.FAIL)
def demod_hrt(data,pmp_temp, verbose = True) -> np.ndarray:
'''
Use constant demodulation matrices to demodulate input data
'''
if pmp_temp == '50':
demod_data = np.array([[ 0.28037298, 0.18741922, 0.25307596, 0.28119895],
[ 0.40408596, 0.10412157, -0.7225681, 0.20825675],
[-0.19126636, -0.5348939, 0.08181918, 0.64422774],
[-0.56897295, 0.58620095, -0.2579202, 0.2414017 ]])
elif pmp_temp == '40':
demod_data = np.array([[ 0.26450154, 0.2839626, 0.12642948, 0.3216773 ],
[ 0.59873885, 0.11278069, -0.74991184, 0.03091451],
[ 0.10833212, -0.5317737, -0.1677862, 0.5923593 ],
[-0.46916953, 0.47738808, -0.43824592, 0.42579797]])
else:
printc("Demodulation Matrix for PMP TEMP of {pmp_temp} deg is not available", color = bcolors.FAIL)
if verbose:
printc(f'Using a constant demodulation matrix for a PMP TEMP of {pmp_temp} deg',color = bcolors.OKGREEN)
demod_data = demod_data.reshape((4,4))
shape = data.shape
demod = np.tile(demod_data, (shape[0],shape[1],1,1))
if data.ndim == 5:
#if data array has more than one scan
data = np.moveaxis(data,-1,0) #moving number of scans to first dimension
data = np.matmul(demod,data)
data = np.moveaxis(data,0,-1) #move scans back to the end
elif data.ndim == 4:
#for if data has just one scan
data = np.matmul(demod,data)
return data, demod
def unsharp_masking(flat,sigma,flat_pmp_temp,cpos_arr,clean_mode,clean_f,pol_end=4,verbose=True):
"""
unsharp masks the flat fields to blur our polarimetric structures due to solar rotation
clean_f = ['blurring', 'fft']
"""
flat_demod, demodM = demod_hrt(flat, flat_pmp_temp,verbose)
norm_factor = np.mean(flat_demod[512:1536,512:1536,0,cpos_arr[0]])
flat_demod /= norm_factor
new_demod_flats = np.copy(flat_demod)
# b_arr = np.zeros((2048,2048,3,5))
if cpos_arr[0] == 0:
wv_range = range(1,6)
elif cpos_arr[0] == 5:
wv_range = range(5)
if clean_mode == "QUV":
start_clean_pol = 1
if verbose:
print("Unsharp Masking Q,U,V")
elif clean_mode == "UV":
start_clean_pol = 2
if verbose:
print("Unsharp Masking U,V")
elif clean_mode == "V":
start_clean_pol = 3
if verbose:
print("Unsharp Masking V")
if clean_f == 'blurring':
blur = lambda a: gaussian_filter(a,sigma)
elif clean_f == 'fft':
x = np.fft.fftfreq(2048,1)
fftgaus2d = np.exp(-2*np.pi**2*(x-0)**2*sigma**2)[:,np.newaxis] * np.exp(-2*np.pi**2*(x-0)**2*sigma**2)[np.newaxis]
blur = lambda a : (np.fft.ifftn(fftgaus2d*np.fft.fftn(a.copy()))).real
for pol in range(start_clean_pol,pol_end):
for wv in wv_range: #not the continuum
a = np.copy(np.clip(flat_demod[:,:,pol,wv], -0.02, 0.02))
b = a - blur(a)
# b_arr[:,:,pol-1,wv-1] = b
c = a - b
new_demod_flats[:,:,pol,wv] = c
invM = np.linalg.inv(demodM)
return np.matmul(invM, new_demod_flats*norm_factor)
def flat_correction(data,flat,flat_states,rows,cols) -> np.ndarray:
"""
correct science data with flat fields
"""
print(" ")
printc('-->>>>>>> Correcting Flatfield',color=bcolors.OKGREEN)
start_time = time.time()
try:
if flat_states == 6:
printc("Dividing by 6 flats, one for each wavelength",color=bcolors.OKGREEN)
tmp = np.mean(flat,axis=-2) #avg over pol states for the wavelength
return data / tmp[rows,cols, np.newaxis, :, np.newaxis]
elif flat_states == 24:
printc("Dividing by 24 flats, one for each image",color=bcolors.OKGREEN)
return data / flat[rows,cols, :, :, np.newaxis] #only one new axis for the scans
elif flat_states == 4:
printc("Dividing by 4 flats, one for each pol state",color=bcolors.OKGREEN)
tmp = np.mean(flat,axis=-1) #avg over wavelength
return data / tmp[rows,cols, :, np.newaxis, np.newaxis]
else:
print(" ")
printc('-->>>>>>> Unable to apply flat correction. Please insert valid flat_states',color=bcolors.WARNING)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------- Flat Field correction time: {np.round(time.time() - start_time,3)} seconds ",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return data
except:
printc("ERROR, Unable to apply flat fields",color=bcolors.FAIL)
def prefilter_correction(data,voltagesData_arr,prefilter,prefilter_voltages):
"""
applies prefilter correction
adapted from SPGPylibs
"""
def _get_v1_index1(x):
index1, v1 = min(enumerate([abs(i) for i in x]), key=itemgetter(1))
return v1, index1
data_shape = data.shape
for scan in range(data_shape[-1]):
voltage_list = voltagesData_arr[scan]
for wv in range(6):
v = voltage_list[wv]
vdif = [v - pf for pf in prefilter_voltages]
v1, index1 = _get_v1_index1(vdif)
if vdif[index1] >= 0:
v2 = vdif[index1 + 1]
index2 = index1 + 1
else:
v2 = vdif[index1-1]
index2 = index1 - 1
imprefilter = (prefilter[:,:, index1]*v1 + prefilter[:,:, index2]*v2)/(v1+v2) #interpolation between nearest voltages
data[:,:,:,wv,scan] /= imprefilter[...,np.newaxis]
return data
def apply_field_stop(data, rows, cols, header_imgdirx_exists, imgdirx_flipped) -> np.ndarray:
"""
apply field stop mask to the science data
"""
print(" ")
printc("-->>>>>>> Applying field stop",color=bcolors.OKGREEN)
start_time = time.time()
field_stop_loc = os.path.realpath(__file__)
field_stop_loc = field_stop_loc.split('src/')[0] + 'field_stop/'
field_stop,_ = load_fits(field_stop_loc + 'HRT_field_stop.fits')
field_stop = np.where(field_stop > 0,1,0)
if header_imgdirx_exists:
if imgdirx_flipped == 'YES': #should be YES for any L1 data, but mistake in processing software
field_stop = field_stop[:,::-1] #also need to flip the flat data after dark correction
data *= field_stop[rows,cols,np.newaxis, np.newaxis, np.newaxis]
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------- Field stop time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return data, field_stop
def load_ghost_field_stop(header_imgdirx_exists, imgdirx_flipped) -> np.ndarray:
"""
apply field stop ghost mask to the science data
"""
print(" ")
printc("-->>>>>>> Loading ghost field stop",color=bcolors.OKGREEN)
start_time = time.time()
field_stop_loc = os.path.realpath(__file__)
field_stop_loc = field_stop_loc.split('src/')[0] + 'field_stop/'
field_stop_ghost,_ = load_fits(field_stop_loc + 'HRT_field_stop_ghost.fits')
field_stop_ghost = np.where(field_stop_ghost > 0,1,0)
if header_imgdirx_exists:
if imgdirx_flipped == 'YES': #should be YES for any L1 data, but mistake in processing software
field_stop_ghost = field_stop_ghost[:,::-1]
printc('--------------------------------------------------------------',bcolors.OKGREEN)
printc(f"------------- Load Ghost Field Stop time: {np.round(time.time() - start_time,3)} seconds",bcolors.OKGREEN)
printc('--------------------------------------------------------------',bcolors.OKGREEN)
return field_stop_ghost
def crosstalk_auto_ItoQUV(data_demod,cpos,wl,roi=np.ones((2048,2048)),verbose=0,npoints=5000,limit=0.2):
import random, statistics
from scipy.optimize import curve_fit
def linear(x,a,b):
return a*x + b
my = []
sy = []
x = data_demod[roi>0,0,cpos].flatten()
ids = np.logical_and(x > limit, x < 1.5)
x = x[ids].flatten()
N = x.size
idx = random.sample(range(N),npoints)
mx = x[idx].mean()
sx = x[idx].std()
xp = np.linspace(x.min(), x.max(), 100)
A = np.vstack([x, np.ones(len(x))]).T
# I to Q
yQ = data_demod[roi>0,1,wl].flatten()
yQ = yQ[ids].flatten()
my.append(yQ[idx].mean())
sy.append(yQ[idx].std())
cQ = curve_fit(linear,x,yQ,p0=[0,0])[0]
pQ = np.poly1d(cQ)
# I to U
yU = data_demod[roi>0,2,wl].flatten()
yU = yU[ids].flatten()
my.append(yU[idx].mean())
sy.append(yU[idx].std())
cU = curve_fit(linear,x,yU,p0=[0,0])[0]
pU = np.poly1d(cU)
# I to V
yV = data_demod[roi>0,3,wl].flatten()
yV = yV[ids].flatten()
my.append(yV[idx].mean())
sy.append(yV[idx].std())
cV = curve_fit(linear,x,yV,p0=[0,0])[0]
pV = np.poly1d(cV)
if verbose:
PLT_RNG = 3
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(x[idx],yQ[idx],color='red',alpha=0.6,s=10)
ax.plot(xp, pQ(xp), color='red', linestyle='dashed',linewidth=3.0)
ax.scatter(x[idx],yU[idx],color='blue',alpha=0.6,s=10)
ax.plot(xp, pU(xp), color='blue', linestyle='dashed',linewidth=3.0)
ax.scatter(x[idx],yV[idx],color='green',alpha=0.6,s=10)
ax.plot(xp, pV(xp), color='green', linestyle='dashed',linewidth=3.0)
ax.set_xlim([mx - PLT_RNG * sx,mx + PLT_RNG * sx])
ax.set_ylim([min(my) - 1.8*PLT_RNG * statistics.mean(sy),max(my) + PLT_RNG * statistics.mean(sy)])
ax.set_xlabel('Stokes I')
ax.set_ylabel('Stokes Q/U/V')
ax.text(mx - 0.9*PLT_RNG * sx, min(my) - 1.4*PLT_RNG * statistics.mean(sy), 'Cross-talk from I to Q: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cQ[0],cQ[1],width=8,prec=4), style='italic',bbox={'facecolor': 'red', 'alpha': 0.1, 'pad': 1}, fontsize=15)
ax.text(mx - 0.9*PLT_RNG * sx, min(my) - 1.55*PLT_RNG * statistics.mean(sy), 'Cross-talk from I to U: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cU[0],cU[1],width=8,prec=4), style='italic',bbox={'facecolor': 'blue', 'alpha': 0.1, 'pad': 1}, fontsize=15)
ax.text(mx - 0.9*PLT_RNG * sx, min(my) - 1.7*PLT_RNG * statistics.mean(sy), 'Cross-talk from I to V: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cV[0],cV[1],width=8,prec=4), style='italic',bbox={'facecolor': 'green', 'alpha': 0.1, 'pad': 1}, fontsize=15)
# fig.show()
print('Cross-talk from I to Q: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cQ[0],cQ[1],width=8,prec=4))
print('Cross-talk from I to U: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cU[0],cU[1],width=8,prec=4))
print('Cross-talk from I to V: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cV[0],cV[1],width=8,prec=4))
# return cQ,cU,cV, (idx,x,xp,yQ,yU,yV,pQ,pU,pV,mx,sx,my,sy)
else:
printc('Cross-talk from I to Q: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cQ[0],cQ[1],width=8,prec=4),color=bcolors.OKGREEN)
printc('Cross-talk from I to U: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cU[0],cU[1],width=8,prec=4),color=bcolors.OKGREEN)
printc('Cross-talk from I to V: slope = {: {width}.{prec}f} ; off-set = {: {width}.{prec}f} '.format(cV[0],cV[1],width=8,prec=4),color=bcolors.OKGREEN)
ct = np.asarray((cQ,cU,cV)).T
return ct
def CT_ItoQUV(data, ctalk_params, norm_stokes, cpos_arr, Ic_mask):
"""
performs cross talk correction for I -> Q,U,V
"""
before_ctalk_data = np.copy(data)
data_shape = data.shape
# ceny = slice(data_shape[0]//2 - data_shape[0]//4, data_shape[0]//2 + data_shape[0]//4)
# cenx = slice(data_shape[1]//2 - data_shape[1]//4, data_shape[1]//2 + data_shape[1]//4)
cont_stokes = np.ones(data_shape[-1])
for scan in range(data_shape[-1]):
cont_stokes[scan] = np.mean(data[Ic_mask[...,scan],0,cpos_arr[0],scan])
for i in range(6):
# stokes_i_wv_avg = np.mean(data[ceny,cenx,0,i,:], axis = (0,1))
stokes_i_wv_avg = np.ones(data_shape[-1])
for scan in range(data_shape[-1]):
stokes_i_wv_avg[scan] = np.mean(data[Ic_mask[...,scan],0,i,scan])
if norm_stokes:
#if normed, applies normalised offset to normed stokes
tmp_param = ctalk_params*np.divide(stokes_i_wv_avg,cont_stokes)
q_slope = tmp_param[0,0,:]
u_slope = tmp_param[0,1,:]
v_slope = tmp_param[0,2,:]
q_int = tmp_param[1,0,:]
u_int = tmp_param[1,1,:]
v_int = tmp_param[1,2,:]
data[:,:,1,i,:] = before_ctalk_data[:,:,1,i,:] - before_ctalk_data[:,:,0,i,:]*q_slope - q_int
data[:,:,2,i,:] = before_ctalk_data[:,:,2,i,:] - before_ctalk_data[:,:,0,i,:]*u_slope - u_int
data[:,:,3,i,:] = before_ctalk_data[:,:,3,i,:] - before_ctalk_data[:,:,0,i,:]*v_slope - v_int
else:
#if not normed, applies raw offset cross talk correction to raw stokes counts
tmp_param = ctalk_params[0,:,:]*np.divide(stokes_i_wv_avg,cont_stokes)
q_slope = tmp_param[0,:]
u_slope = tmp_param[1,:]
v_slope = tmp_param[2,:]
q_int = ctalk_params[1,0,:]
u_int = ctalk_params[1,1,:]
v_int = ctalk_params[1,2,:]
data[:,:,1,i,:] = before_ctalk_data[:,:,1,i,:] - before_ctalk_data[:,:,0,i,:]*q_slope - q_int*stokes_i_wv_avg
data[:,:,2,i,:] = before_ctalk_data[:,:,2,i,:] - before_ctalk_data[:,:,0,i,:]*u_slope - u_int*stokes_i_wv_avg
data[:,:,3,i,:] = before_ctalk_data[:,:,3,i,:] - before_ctalk_data[:,:,0,i,:]*v_slope - v_int*stokes_i_wv_avg
return data | [
"numpy.clip",
"numpy.array",
"scipy.ndimage.gaussian_filter",
"numpy.moveaxis",
"operator.itemgetter",
"numpy.poly1d",
"numpy.divide",
"numpy.mean",
"numpy.where",
"numpy.asarray",
"numpy.exp",
"numpy.matmul",
"numpy.tile",
"numpy.ones",
"time.time",
"scipy.optimize.curve_fit",
"numpy.copy",
"statistics.mean",
"numpy.logical_and",
"numpy.fft.fftfreq",
"os.path.realpath",
"numpy.linalg.inv"
] | [((2090, 2101), 'time.time', 'time.time', ([], {}), '()\n', (2099, 2101), False, 'import time\n'), ((3010, 3034), 'numpy.moveaxis', 'np.moveaxis', (['flat', '(0)', '(-1)'], {}), '(flat, 0, -1)\n', (3021, 3034), True, 'import numpy as np\n'), ((3172, 3196), 'numpy.moveaxis', 'np.moveaxis', (['flat', '(2)', '(-1)'], {}), '(flat, 2, -1)\n', (3183, 3196), True, 'import numpy as np\n'), ((4625, 4636), 'time.time', 'time.time', ([], {}), '()\n', (4634, 4636), False, 'import time\n'), ((6117, 6128), 'time.time', 'time.time', ([], {}), '()\n', (6126, 6128), False, 'import time\n'), ((6892, 6903), 'time.time', 'time.time', ([], {}), '()\n', (6901, 6903), False, 'import time\n'), ((8679, 8726), 'numpy.tile', 'np.tile', (['demod_data', '(shape[0], shape[1], 1, 1)'], {}), '(demod_data, (shape[0], shape[1], 1, 1))\n', (8686, 8726), True, 'import numpy as np\n'), ((9438, 9493), 'numpy.mean', 'np.mean', (['flat_demod[512:1536, 512:1536, 0, cpos_arr[0]]'], {}), '(flat_demod[512:1536, 512:1536, 0, cpos_arr[0]])\n', (9445, 9493), True, 'import numpy as np\n'), ((9545, 9564), 'numpy.copy', 'np.copy', (['flat_demod'], {}), '(flat_demod)\n', (9552, 9564), True, 'import numpy as np\n'), ((10765, 10786), 'numpy.linalg.inv', 'np.linalg.inv', (['demodM'], {}), '(demodM)\n', (10778, 10786), True, 'import numpy as np\n'), ((10799, 10845), 'numpy.matmul', 'np.matmul', (['invM', '(new_demod_flats * norm_factor)'], {}), '(invM, new_demod_flats * norm_factor)\n', (10808, 10845), True, 'import numpy as np\n'), ((11072, 11083), 'time.time', 'time.time', ([], {}), '()\n', (11081, 11083), False, 'import time\n'), ((13793, 13804), 'time.time', 'time.time', ([], {}), '()\n', (13802, 13804), False, 'import time\n'), ((13827, 13853), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (13843, 13853), False, 'import os\n'), ((14012, 14042), 'numpy.where', 'np.where', (['(field_stop > 0)', '(1)', '(0)'], {}), '(field_stop > 0, 1, 0)\n', (14020, 14042), True, 'import numpy as np\n'), ((14925, 14936), 'time.time', 'time.time', ([], {}), '()\n', (14934, 14936), False, 'import time\n'), ((14959, 14985), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (14975, 14985), False, 'import os\n'), ((15160, 15196), 'numpy.where', 'np.where', (['(field_stop_ghost > 0)', '(1)', '(0)'], {}), '(field_stop_ghost > 0, 1, 0)\n', (15168, 15196), True, 'import numpy as np\n'), ((15772, 15793), 'numpy.ones', 'np.ones', (['(2048, 2048)'], {}), '((2048, 2048))\n', (15779, 15793), True, 'import numpy as np\n'), ((16027, 16061), 'numpy.logical_and', 'np.logical_and', (['(x > limit)', '(x < 1.5)'], {}), '(x > limit, x < 1.5)\n', (16041, 16061), True, 'import numpy as np\n'), ((16474, 16487), 'numpy.poly1d', 'np.poly1d', (['cQ'], {}), '(cQ)\n', (16483, 16487), True, 'import numpy as np\n'), ((16683, 16696), 'numpy.poly1d', 'np.poly1d', (['cU'], {}), '(cU)\n', (16692, 16696), True, 'import numpy as np\n'), ((16892, 16905), 'numpy.poly1d', 'np.poly1d', (['cV'], {}), '(cV)\n', (16901, 16905), True, 'import numpy as np\n'), ((19729, 19742), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (19736, 19742), True, 'import numpy as np\n'), ((19981, 20004), 'numpy.ones', 'np.ones', (['data_shape[-1]'], {}), '(data_shape[-1])\n', (19988, 20004), True, 'import numpy as np\n'), ((7707, 7928), 'numpy.array', 'np.array', (['[[0.28037298, 0.18741922, 0.25307596, 0.28119895], [0.40408596, 0.10412157,\n -0.7225681, 0.20825675], [-0.19126636, -0.5348939, 0.08181918, \n 0.64422774], [-0.56897295, 0.58620095, -0.2579202, 0.2414017]]'], {}), '([[0.28037298, 0.18741922, 0.25307596, 0.28119895], [0.40408596, \n 0.10412157, -0.7225681, 0.20825675], [-0.19126636, -0.5348939, \n 0.08181918, 0.64422774], [-0.56897295, 0.58620095, -0.2579202, 0.2414017]])\n', (7715, 7928), True, 'import numpy as np\n'), ((8809, 8833), 'numpy.moveaxis', 'np.moveaxis', (['data', '(-1)', '(0)'], {}), '(data, -1, 0)\n', (8820, 8833), True, 'import numpy as np\n'), ((8891, 8913), 'numpy.matmul', 'np.matmul', (['demod', 'data'], {}), '(demod, data)\n', (8900, 8913), True, 'import numpy as np\n'), ((8928, 8952), 'numpy.moveaxis', 'np.moveaxis', (['data', '(0)', '(-1)'], {}), '(data, 0, -1)\n', (8939, 8952), True, 'import numpy as np\n'), ((16430, 16465), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'x', 'yQ'], {'p0': '[0, 0]'}), '(linear, x, yQ, p0=[0, 0])\n', (16439, 16465), False, 'from scipy.optimize import curve_fit\n'), ((16639, 16674), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'x', 'yU'], {'p0': '[0, 0]'}), '(linear, x, yU, p0=[0, 0])\n', (16648, 16674), False, 'from scipy.optimize import curve_fit\n'), ((16848, 16883), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'x', 'yV'], {'p0': '[0, 0]'}), '(linear, x, yV, p0=[0, 0])\n', (16857, 16883), False, 'from scipy.optimize import curve_fit\n'), ((20077, 20132), 'numpy.mean', 'np.mean', (['data[Ic_mask[..., scan], 0, cpos_arr[0], scan]'], {}), '(data[Ic_mask[..., scan], 0, cpos_arr[0], scan])\n', (20084, 20132), True, 'import numpy as np\n'), ((20273, 20296), 'numpy.ones', 'np.ones', (['data_shape[-1]'], {}), '(data_shape[-1])\n', (20280, 20296), True, 'import numpy as np\n'), ((6933, 6977), 'numpy.mean', 'np.mean', (['flat[ceny, cenx, :, :]'], {'axis': '(0, 1)'}), '(flat[ceny, cenx, :, :], axis=(0, 1))\n', (6940, 6977), True, 'import numpy as np\n'), ((8054, 8274), 'numpy.array', 'np.array', (['[[0.26450154, 0.2839626, 0.12642948, 0.3216773], [0.59873885, 0.11278069, -\n 0.74991184, 0.03091451], [0.10833212, -0.5317737, -0.1677862, 0.5923593\n ], [-0.46916953, 0.47738808, -0.43824592, 0.42579797]]'], {}), '([[0.26450154, 0.2839626, 0.12642948, 0.3216773], [0.59873885, \n 0.11278069, -0.74991184, 0.03091451], [0.10833212, -0.5317737, -\n 0.1677862, 0.5923593], [-0.46916953, 0.47738808, -0.43824592, 0.42579797]])\n', (8062, 8274), True, 'import numpy as np\n'), ((9063, 9085), 'numpy.matmul', 'np.matmul', (['demod', 'data'], {}), '(demod, data)\n', (9072, 9085), True, 'import numpy as np\n'), ((10157, 10182), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['a', 'sigma'], {}), '(a, sigma)\n', (10172, 10182), False, 'from scipy.ndimage import gaussian_filter\n'), ((10221, 10244), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['(2048)', '(1)'], {}), '(2048, 1)\n', (10235, 10244), True, 'import numpy as np\n'), ((11261, 11283), 'numpy.mean', 'np.mean', (['flat'], {'axis': '(-2)'}), '(flat, axis=-2)\n', (11268, 11283), True, 'import numpy as np\n'), ((19528, 19552), 'numpy.asarray', 'np.asarray', (['(cQ, cU, cV)'], {}), '((cQ, cU, cV))\n', (19538, 19552), True, 'import numpy as np\n'), ((20376, 20421), 'numpy.mean', 'np.mean', (['data[Ic_mask[..., scan], 0, i, scan]'], {}), '(data[Ic_mask[..., scan], 0, i, scan])\n', (20383, 20421), True, 'import numpy as np\n'), ((10572, 10619), 'numpy.clip', 'np.clip', (['flat_demod[:, :, pol, wv]', '(-0.02)', '(0.02)'], {}), '(flat_demod[:, :, pol, wv], -0.02, 0.02)\n', (10579, 10619), True, 'import numpy as np\n'), ((12715, 12728), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (12725, 12728), False, 'from operator import itemgetter\n'), ((20560, 20599), 'numpy.divide', 'np.divide', (['stokes_i_wv_avg', 'cont_stokes'], {}), '(stokes_i_wv_avg, cont_stokes)\n', (20569, 20599), True, 'import numpy as np\n'), ((21312, 21351), 'numpy.divide', 'np.divide', (['stokes_i_wv_avg', 'cont_stokes'], {}), '(stokes_i_wv_avg, cont_stokes)\n', (21321, 21351), True, 'import numpy as np\n'), ((10264, 10315), 'numpy.exp', 'np.exp', (['(-2 * np.pi ** 2 * (x - 0) ** 2 * sigma ** 2)'], {}), '(-2 * np.pi ** 2 * (x - 0) ** 2 * sigma ** 2)\n', (10270, 10315), True, 'import numpy as np\n'), ((10318, 10369), 'numpy.exp', 'np.exp', (['(-2 * np.pi ** 2 * (x - 0) ** 2 * sigma ** 2)'], {}), '(-2 * np.pi ** 2 * (x - 0) ** 2 * sigma ** 2)\n', (10324, 10369), True, 'import numpy as np\n'), ((11762, 11784), 'numpy.mean', 'np.mean', (['flat'], {'axis': '(-1)'}), '(flat, axis=-1)\n', (11769, 11784), True, 'import numpy as np\n'), ((17720, 17739), 'statistics.mean', 'statistics.mean', (['sy'], {}), '(sy)\n', (17735, 17739), False, 'import random, statistics\n'), ((18007, 18026), 'statistics.mean', 'statistics.mean', (['sy'], {}), '(sy)\n', (18022, 18026), False, 'import random, statistics\n'), ((18294, 18313), 'statistics.mean', 'statistics.mean', (['sy'], {}), '(sy)\n', (18309, 18313), False, 'import random, statistics\n'), ((4275, 4286), 'time.time', 'time.time', ([], {}), '()\n', (4284, 4286), False, 'import time\n'), ((6464, 6475), 'time.time', 'time.time', ([], {}), '()\n', (6473, 6475), False, 'import time\n'), ((14493, 14504), 'time.time', 'time.time', ([], {}), '()\n', (14502, 14504), False, 'import time\n'), ((15545, 15556), 'time.time', 'time.time', ([], {}), '()\n', (15554, 15556), False, 'import time\n'), ((17523, 17542), 'statistics.mean', 'statistics.mean', (['sy'], {}), '(sy)\n', (17538, 17542), False, 'import random, statistics\n'), ((17563, 17582), 'statistics.mean', 'statistics.mean', (['sy'], {}), '(sy)\n', (17578, 17582), False, 'import random, statistics\n'), ((5598, 5609), 'time.time', 'time.time', ([], {}), '()\n', (5607, 5609), False, 'import time\n'), ((7224, 7235), 'time.time', 'time.time', ([], {}), '()\n', (7233, 7235), False, 'import time\n'), ((12210, 12221), 'time.time', 'time.time', ([], {}), '()\n', (12219, 12221), False, 'import time\n')] |
import os
import torch
import itertools
import numpy as np
from os.path import join
from torch.autograd import Variable
EOS = '<eos>'
UNK = '<unk>'
def tokenize(str_, add_bos=False, add_eos=False):
words = []
if add_bos:
words += [EOS]
words += str_.split()
if add_eos:
words += [EOS]
return words
class Dictionary(object):
def __init__(self):
self.word2idx = {UNK:0}
self.idx2word = [UNK]
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
@property
def bos_id(self):
return self.word2idx[EOS]
@property
def eos_id(self):
return self.word2idx[EOS]
@property
def unk_id(self):
return self.word2idx[UNK]
def tokenize_file(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = tokenize(line, add_eos=True)
tokens += len(words)
for word in words:
self.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
if not line.strip():
continue
words = tokenize(line, add_eos=True)
for word in words:
ids[token] = self.word2idx[word]
token += 1
return ids
def words_to_ids(self, words, cuda):
tt = torch.cuda if cuda else torch
ids = tt.LongTensor(len(words))
for i, word in enumerate(words):
ids[i] = self.word2idx.get(word, self.word2idx[UNK])
return ids
class Subset(object):
yields_sentences = False
def __init__(self, dictionary, path, cuda, rng=None):
del rng # Unused in this iterator
self.tokens = dictionary.tokenize_file(path)
self.eos_id = dictionary.eos_id
self.unk_id = dictionary.unk_id
self.cuda = cuda
self._last_bsz = None
def batchify(self, bsz):
if self._last_bsz == bsz:
return self._last_batched_data
data = self.tokens
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if self.cuda:
data = data.cuda()
self._last_bsz = bsz
self._last_batched_data = data
return data
def get_num_batches(self, bsz, bptt):
data = self.batchify(bsz)
return int(np.ceil((1.0 * data.size(0) - 1.0) / bptt))
def iter_epoch(self, bsz, bptt, evaluation=False):
data = self.batchify(bsz)
data_len = data.size(0)
for i in range(0, data_len - 1, bptt):
seq_len = min(bptt, data_len - 1 - i)
x = Variable(data[i:i+seq_len], volatile=evaluation)
y = Variable(data[i+1:i+1+seq_len])
yield x, y, None
class SubsetBySentence():
yields_sentences = True
def __init__(self, dictionary, path, cuda, rng=None):
raw_data = dictionary.tokenize_file(path).numpy()
self.eos_id = dictionary.eos_id
self.unk_id = dictionary.unk_id
raw_data = np.split(raw_data, np.where(raw_data == self.eos_id)[0] + 1)
last = raw_data.pop()
assert last.shape[0] == 0
# Add 1 to sentence length because we want to have both an eos
# at the beginning and at the end.
max_sentence_len = max(sent.shape[0] for sent in raw_data) + 1
padded_data = np.zeros(
(len(raw_data), max_sentence_len+1), dtype='int64') + -1
for i, sent in enumerate(raw_data):
padded_data[i, 1:sent.shape[0]+1] = sent
padded_data[:, 0] = self.eos_id
tokens = torch.from_numpy(padded_data)
self.sentences = padded_data
self.cuda = cuda
self.rng = np.random.RandomState(rng)
def get_num_batches(self, bsz, bptt=None):
del bptt # unused
#return int(np.ceil(1.0 * self.sentences.shape[0] / bsz))
return int(1.0 * self.sentences.shape[0] / bsz) # always return batch of full size (`bsz`)
def iter_epoch(self, bsz, bptt=None, evaluation=False, ):
del bptt # unused
num_sentences = self.sentences.shape[0]
sentences = self.sentences
if not evaluation:
sentences = np.array(sentences)
self.rng.shuffle(sentences)
for i in range(0, num_sentences-bsz, bsz): # always return batch of full size (`bsz`)
batch = sentences[i:i+bsz]
seq_lens = (batch != -1).sum(1)
seq_lens_idx = np.argsort(-seq_lens)
seq_lens = seq_lens[seq_lens_idx]
batch = batch[seq_lens_idx, :]
max_len = seq_lens.max()
x = np.array(batch[:, :max_len].T)
x[x == -1] = self.eos_id
x = torch.from_numpy(x)
y = torch.from_numpy(batch[:, 1:max_len + 1].T).contiguous()
seq_lens = torch.from_numpy(seq_lens)
if self.cuda:
x = x.cuda()
y = y.cuda()
seq_lens = seq_lens.cuda()
x = Variable(x, volatile=evaluation)
y = Variable(y, volatile=evaluation)
seq_lens = Variable(seq_lens, volatile=evaluation)
yield x, y, seq_lens
class Wrapper(object):
def __init__(self, dictionary, path, cuda, rng=None):
self.class0 = SubsetBySentence(
dictionary, path + '.0', cuda, rng=rng)
self.class1 = SubsetBySentence(
dictionary, path + '.1', cuda, rng=rng)
def get_num_batches(self, bsz):
return min(self.class0.get_num_batches(bsz),
self.class1.get_num_batches(bsz))
def iter_epoch(self, bsz, evaluation=False):
return itertools.imap(zip,
self.class0.iter_epoch(bsz=bsz, evaluation=evaluation),
self.class1.iter_epoch(bsz=bsz, evaluation=evaluation))
class Corpus(object):
def __init__(self, path, cuda, rng=None):
self.dictionary = Dictionary()
self.train = Wrapper(self.dictionary, path + 'train', cuda, rng=rng)
self.valid = Wrapper(self.dictionary, path +'dev', cuda, rng=rng)
self.test = Wrapper(self.dictionary, path + 'test', cuda, rng=rng)
| [
"os.path.exists",
"numpy.where",
"torch.LongTensor",
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"torch.autograd.Variable",
"numpy.random.RandomState"
] | [((1012, 1032), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1026, 1032), False, 'import os\n'), ((4290, 4319), 'torch.from_numpy', 'torch.from_numpy', (['padded_data'], {}), '(padded_data)\n', (4306, 4319), False, 'import torch\n'), ((4402, 4428), 'numpy.random.RandomState', 'np.random.RandomState', (['rng'], {}), '(rng)\n', (4423, 4428), True, 'import numpy as np\n'), ((1406, 1430), 'torch.LongTensor', 'torch.LongTensor', (['tokens'], {}), '(tokens)\n', (1422, 1430), False, 'import torch\n'), ((3327, 3377), 'torch.autograd.Variable', 'Variable', (['data[i:i + seq_len]'], {'volatile': 'evaluation'}), '(data[i:i + seq_len], volatile=evaluation)\n', (3335, 3377), False, 'from torch.autograd import Variable\n'), ((3392, 3429), 'torch.autograd.Variable', 'Variable', (['data[i + 1:i + 1 + seq_len]'], {}), '(data[i + 1:i + 1 + seq_len])\n', (3400, 3429), False, 'from torch.autograd import Variable\n'), ((4895, 4914), 'numpy.array', 'np.array', (['sentences'], {}), '(sentences)\n', (4903, 4914), True, 'import numpy as np\n'), ((5161, 5182), 'numpy.argsort', 'np.argsort', (['(-seq_lens)'], {}), '(-seq_lens)\n', (5171, 5182), True, 'import numpy as np\n'), ((5326, 5356), 'numpy.array', 'np.array', (['batch[:, :max_len].T'], {}), '(batch[:, :max_len].T)\n', (5334, 5356), True, 'import numpy as np\n'), ((5410, 5429), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5426, 5429), False, 'import torch\n'), ((5526, 5552), 'torch.from_numpy', 'torch.from_numpy', (['seq_lens'], {}), '(seq_lens)\n', (5542, 5552), False, 'import torch\n'), ((5696, 5728), 'torch.autograd.Variable', 'Variable', (['x'], {'volatile': 'evaluation'}), '(x, volatile=evaluation)\n', (5704, 5728), False, 'from torch.autograd import Variable\n'), ((5745, 5777), 'torch.autograd.Variable', 'Variable', (['y'], {'volatile': 'evaluation'}), '(y, volatile=evaluation)\n', (5753, 5777), False, 'from torch.autograd import Variable\n'), ((5801, 5840), 'torch.autograd.Variable', 'Variable', (['seq_lens'], {'volatile': 'evaluation'}), '(seq_lens, volatile=evaluation)\n', (5809, 5840), False, 'from torch.autograd import Variable\n'), ((3744, 3777), 'numpy.where', 'np.where', (['(raw_data == self.eos_id)'], {}), '(raw_data == self.eos_id)\n', (3752, 3777), True, 'import numpy as np\n'), ((5446, 5489), 'torch.from_numpy', 'torch.from_numpy', (['batch[:, 1:max_len + 1].T'], {}), '(batch[:, 1:max_len + 1].T)\n', (5462, 5489), False, 'import torch\n')] |
"""
The following module contains three functions each of
which answers the questions in the challenge, the functions
in this module are written in the order they are requested
in the challenge document.
Written by: <NAME>
"""
from urllib.parse import urlparse
import requests
def filter_pokemon_by_names(base_api_url: str, pokemon_species_url: str) -> int:
"""Function to get the pokemon filtered by names
as specified in the challenge.
The number of species of pokemon's base form is
taken into account for this task.
Args:
base_api_url (str): URL to get pokemon data from pokeAPI.
pokemon_species_url (str): URL to get pokemon's base
form data from pokeAPI.
Returns:
int: Number of pokemon existen from name specification.
"""
pokemon_set = set()
pokemon_count = requests.get(url=pokemon_species_url).json()['count']
pokemon_species_data = requests.get(url=base_api_url + str(pokemon_count)).json()
pokemon_names = [pokemon['name'] for pokemon in pokemon_species_data['results']]
for pokemon in pokemon_names:
if ("at" in pokemon) and (pokemon.count('a') == 2):
pokemon_set.add(pokemon)
return len(pokemon_set)
def breeding_compatibility(pokemon_species_url: str, pokemon_name: str) -> int:
"""Function to get the number of breeding matches
for a specified pokemon.
Args:
pokemon_species_url (str): URL to get pokemon's base
form data from pokeAPI.
pokemon_name (str): Pokemon name to search for breeding matches
Returns:
int: Number of breeding matches for specified pokemon.
"""
matches = set()
pokemon_egg_group_data = requests.get(url=pokemon_species_url + pokemon_name)\
.json()['egg_groups']
egg_groups_urls = [egg_group['url'] for egg_group in pokemon_egg_group_data]
for url in egg_groups_urls:
egg_group_data = requests.get(url=url).json()
matches |= {poke['name'] for poke in egg_group_data['pokemon_species']}
return len(matches)
def minmax_weight_by_type(pokemon_type_url: str, type_name: str) -> list():
"""Function to get the maximum and minimum weight for
a specified pokemon type of the first generation (Kanto).
Args:
pokemon_type_url (str): URL to get pokemon types information.
type_name (str): Type of pokemon to search for max and min weight.
Returns:
list: List with two elements which are the maximum and minimun weight
of the specified type of pokemon.
"""
weights_list = list()
max_min_weight = list()
pokemon_type_info = requests.get(url=pokemon_type_url + type_name).json()
pokemon_info = [pokemon['pokemon'] for pokemon in pokemon_type_info['pokemon']]
for pokemon in pokemon_info:
parsed_pokemon_url = urlparse(pokemon['url'])
if int(parsed_pokemon_url.path.rsplit("/", 2)[-2]) <= 151:
pokemon_data = requests.get(url=pokemon['url']).json()
weights_list.append(pokemon_data['weight'])
max_min_weight.append(max(weights_list))
max_min_weight.append(min(weights_list))
return max_min_weight
if __name__ == "__main__":
BASE_POKEMON_API_URL = "https://pokeapi.co/api/v2/pokemon/?limit="
POKEMON_SPECIES_API_URL = "https://pokeapi.co/api/v2/pokemon-species/"
POKEMON_TYPES_API_URL = "https://pokeapi.co/api/v2/type/"
ANSWER_QUESTION_1 = filter_pokemon_by_names(BASE_POKEMON_API_URL, POKEMON_SPECIES_API_URL)
ANSWER_QUESTION_2 = breeding_compatibility(POKEMON_SPECIES_API_URL, 'raichu')
ANSWER_QUESTION_3 = minmax_weight_by_type(POKEMON_TYPES_API_URL, 'fighting')
print(f"Pokemon number containing \"at\" and two \"a\" in their names: {ANSWER_QUESTION_1} \n"
f"The number of pokemon raichu can breed with is: {ANSWER_QUESTION_2} \n"
f"Maximum and minimun weight of G1 fighting type pokemon: {ANSWER_QUESTION_3}")
| [
"urllib.parse.urlparse",
"requests.get"
] | [((2833, 2857), 'urllib.parse.urlparse', 'urlparse', (["pokemon['url']"], {}), "(pokemon['url'])\n", (2841, 2857), False, 'from urllib.parse import urlparse\n'), ((2633, 2679), 'requests.get', 'requests.get', ([], {'url': '(pokemon_type_url + type_name)'}), '(url=pokemon_type_url + type_name)\n', (2645, 2679), False, 'import requests\n'), ((843, 880), 'requests.get', 'requests.get', ([], {'url': 'pokemon_species_url'}), '(url=pokemon_species_url)\n', (855, 880), False, 'import requests\n'), ((1703, 1755), 'requests.get', 'requests.get', ([], {'url': '(pokemon_species_url + pokemon_name)'}), '(url=pokemon_species_url + pokemon_name)\n', (1715, 1755), False, 'import requests\n'), ((1925, 1946), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (1937, 1946), False, 'import requests\n'), ((2952, 2984), 'requests.get', 'requests.get', ([], {'url': "pokemon['url']"}), "(url=pokemon['url'])\n", (2964, 2984), False, 'import requests\n')] |
from django.db import models
class CookieConsentSettings(models.Model):
message = models.TextField(
default="This website uses cookies to ensure you get the best experience on our website."
)
button_text = models.CharField(
default="Got it!",
max_length=255
)
cookie_policy_link = models.CharField(
max_length=255
)
cookie_policy_link_text = models.CharField(
default="Learn more",
max_length=255
)
banner_colour = models.CharField(
default="#252e39",
max_length=255
)
banner_text_colour = models.CharField(
default="#ffffff",
max_length=255
)
button_colour = models.CharField(
default="#3acdf6",
max_length=255
)
button_text_colour = models.CharField(
default="#ffffff",
max_length=255
)
| [
"django.db.models.TextField",
"django.db.models.CharField"
] | [((88, 205), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""This website uses cookies to ensure you get the best experience on our website."""'}), "(default=\n 'This website uses cookies to ensure you get the best experience on our website.'\n )\n", (104, 205), False, 'from django.db import models\n'), ((229, 280), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Got it!"""', 'max_length': '(255)'}), "(default='Got it!', max_length=255)\n", (245, 280), False, 'from django.db import models\n'), ((329, 361), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (345, 361), False, 'from django.db import models\n'), ((407, 461), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Learn more"""', 'max_length': '(255)'}), "(default='Learn more', max_length=255)\n", (423, 461), False, 'from django.db import models\n'), ((505, 556), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#252e39"""', 'max_length': '(255)'}), "(default='#252e39', max_length=255)\n", (521, 556), False, 'from django.db import models\n'), ((605, 656), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#ffffff"""', 'max_length': '(255)'}), "(default='#ffffff', max_length=255)\n", (621, 656), False, 'from django.db import models\n'), ((700, 751), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#3acdf6"""', 'max_length': '(255)'}), "(default='#3acdf6', max_length=255)\n", (716, 751), False, 'from django.db import models\n'), ((800, 851), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#ffffff"""', 'max_length': '(255)'}), "(default='#ffffff', max_length=255)\n", (816, 851), False, 'from django.db import models\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import forest
def get_exercises():
exercises = []
for i in xrange(5):
exercises.append('')
exercises[0]="Подтягивания"
exercises[1]="Отжимания"
exercises[2]="Приседания"
exercises[3]="Пресс"
exercises[4]="Отжимания на брусьях"
return exercises
def get_locations():
locations = []
for i in xrange(18):
locations.append('')
locations[0]="Зелёный перелесок"
locations[1]="Цветущий луг"
locations[2]="Опушка"
locations[3]="Берёзовая роща"
locations[4]="Овраг"
locations[5]="Болото"
locations[6]="Речка"
locations[7]="Разнотравие"
locations[8]="Ручей"
locations[9]="Кустарники"
locations[10]="Родник"
locations[11]="Южный склон холма"
locations[12]="Холм"
locations[13]="Перелесок"
locations[14]="Северный склон холма"
locations[15]="Лес"
locations[16]="Сосновый бор"
locations[17]="Бурелом"
return locations
def get_skill_groups():
"""
название | тип навыка | тип цели
тип навыка:
0 - пассивный
1 - усиление
2 - активный)
тип цели:
TARGET_TYPE_SELF = 0
TARGET_TYPE_SINGLE_ACTIVE_FROM_MY_TEAM = 1
TARGET_TYPE_SINGLE_INACTIVE_FROM_MY_TEAM = 2
TARGET_TYPE_SINGLE_ACTIVE_FROM_TEAMMATES = 3
TARGET_TYPE_ACTIVE_MY_TEAM = 4
TARGET_TYPE_SINGLE_ACTIVE_FROM_OPPOSITE_TEAM = 5
TARGET_TYPE_SINGLE_INACTIVE_FROM_OPPOSITE_TEAM = 6
TARGET_TYPE_ACTIVE_OPPOSITE_TEAM = 7
TARGET_TYPE_ACTIVE_ALL = 8
"""
SKILL_TYPE_PASSIVE = 0
SKILL_TYPE_GAIN = 1
SKILL_TYPE_ACTIVE = 2
TARGET_TYPE_SELF = 0
TARGET_TYPE_SINGLE_ACTIVE_FROM_MY_TEAM = 1
TARGET_TYPE_SINGLE_INACTIVE_FROM_MY_TEAM = 2
TARGET_TYPE_SINGLE_ACTIVE_FROM_TEAMMATES = 3
TARGET_TYPE_ACTIVE_MY_TEAM = 4
TARGET_TYPE_SINGLE_ACTIVE_FROM_OPPOSITE_TEAM = 5
TARGET_TYPE_SINGLE_INACTIVE_FROM_OPPOSITE_TEAM = 6
TARGET_TYPE_ACTIVE_OPPOSITE_TEAM = 7
TARGET_TYPE_ACTIVE_ALL = 8
skill_groups = []
for i in xrange(17):
skill_groups.append('')
skill_groups[0] = 'Увеличение сопротивления|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[1] = 'Увеличение фитнес очков|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[2] = 'Увеличение множителя ФО|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[3] = 'Увеличение бонуса|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[4] = 'Увеличение множителя на 1 ход|{}|{}'.format(SKILL_TYPE_GAIN, TARGET_TYPE_SELF)
skill_groups[5] = 'Увеличение сопротивления на 1 ход|{}|{}'.format(SKILL_TYPE_GAIN, TARGET_TYPE_SELF)
skill_groups[6] = 'Восстановление своих ФО|{}|{}'.format(SKILL_TYPE_ACTIVE, TARGET_TYPE_SELF)
skill_groups[7] = 'Увеличение максимума ФО команды|{}|{}'.format(SKILL_TYPE_GAIN, TARGET_TYPE_ACTIVE_MY_TEAM)
skill_groups[8] = 'Увеличение сопротивления команды|{}|{}'.format(SKILL_TYPE_GAIN, TARGET_TYPE_ACTIVE_MY_TEAM)
skill_groups[9] = 'Увеличение множителя команды|{}|{}'.format(SKILL_TYPE_GAIN, TARGET_TYPE_ACTIVE_MY_TEAM)
skill_groups[10] = 'Увеличение бонуса команды|{}|{}'.format(SKILL_TYPE_GAIN, TARGET_TYPE_ACTIVE_MY_TEAM)
skill_groups[11] = 'Базовое увеличение регенерации|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[12] = 'Базовое увеличение множителя|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[13] = 'Базовое увеличение сопротивления|{}|{}'.format(SKILL_TYPE_PASSIVE, TARGET_TYPE_SELF)
skill_groups[14] = 'Воздействие на всю команду соперников|{}|{}'.format(SKILL_TYPE_ACTIVE, TARGET_TYPE_ACTIVE_OPPOSITE_TEAM)
skill_groups[15] = 'Восстановление ФО своей команды|{}|{}'.format(SKILL_TYPE_ACTIVE, TARGET_TYPE_ACTIVE_MY_TEAM)
skill_groups[16] = 'Восстановление ФО союзника|{}|{}'.format(SKILL_TYPE_ACTIVE, TARGET_TYPE_SINGLE_ACTIVE_FROM_MY_TEAM)
#skill_groups[11] = 'Возвращение игрока в игру|{}|{}'
return skill_groups
def get_characters():
names = []
for i in xrange(forest.get_locations_count()):
names.append([])
for j in xrange(forest.get_max_character_position()):
names[i].append('')
names[0][0]="<NAME>жик"
names[0][1]="Ёж"
names[0][2]="Старший ёж"
names[0][3]="Ёжик в кепке"
names[0][4]="Ежище"
names[1][0]="<NAME>-русак"
names[1][1]="Заяц-русак"
names[1][2]="Сильный заяц-русак"
names[1][3]="Опытный заяц-русак"
names[1][4]="<NAME>"
names[2][0]="Лисёнок"
names[2][1]="Молодой лис"
names[2][2]="Лис"
names[2][3]="Опытный лис"
names[2][4]="Лис-главарь"
names[3][0]="Волчонок"
names[3][1]="Молодой волк"
names[3][2]="Волк"
names[3][3]="Матёрый волк"
names[3][4]="Волк-вожак"
names[4][0]="Медвежонок"
names[4][1]="<NAME>"
names[4][2]="Медведь-лежебока"
names[4][3]="Медведь"
names[4][4]="Гигантский медведь"
names[5][0]="<NAME>"
names[5][1]="Куница"
names[5][2]="Сильная куница"
names[5][3]="Опытная куница"
names[5][4]="Предводительница куниц"
names[6][0]="<NAME>"
names[6][1]="Ленивый барсук"
names[6][2]="Барсук"
names[6][3]="Барсук-атлет"
names[6][4]="<NAME>"
names[7][0]="<NAME>"
names[7][1]="Бурундук"
names[7][2]="Сильный бурундук"
names[7][3]="Опытный бурундук"
names[7][4]="<NAME>"
names[8][0]="Бельчонок"
names[8][1]="<NAME>"
names[8][2]="Белка"
names[8][3]="Сыт<NAME>"
names[8][4]="Белка-сопелка"
names[9][0]="Лосёнок"
names[9][1]="<NAME>"
names[9][2]="Лосиха"
names[9][3]="Лось"
names[9][4]="Вож<NAME>"
names[10][0]="Молод<NAME>"
names[10][1]="Зубр"
names[10][2]="Зубр-мыслитель"
names[10][3]="Сильный зубр"
names[10][4]="<NAME>"
names[11][0]="Молодой кабан"
names[11][1]="Суровый кабан"
names[11][2]="Кабан"
names[11][3]="Сильный кабан"
names[11][4]="Главный кабан"
names[12][0]="Молодая выдра"
names[12][1]="Выдра"
names[12][2]="Выдра-мыслитель"
names[12][3]="Сильная выдра"
names[12][4]="Старшая выдра"
names[13][0]="Молодой енот"
names[13][1]="Енот"
names[13][2]="Енот-сказочник"
names[13][3]="Сильный енот"
names[13][4]="Предводитель енотов"
names[14][0]="Молодая рысь"
names[14][1]="Рысь"
names[14][2]="Рысь-попрыгунья"
names[14][3]="Сильная рысь"
names[14][4]="Крупная рысь"
names[15][0]="Молодая куропатка"
names[15][1]="Куропатка"
names[15][2]="Куропатка-экстремалка"
names[15][3]="Сильная куропатка"
names[15][4]="Вожак куропаток"
names[16][0]="Молодой сайгак"
names[16][1]="Сайгак-болтун"
names[16][2]="Сайгак"
names[16][3]="<NAME>"
names[16][4]="<NAME>"
names[17][0]="Оленёнок"
names[17][1]="<NAME>"
names[17][2]="Олень"
names[17][3]="<NAME>"
names[17][4]="Олень-вожак"
return names
def get_extra_character_name(name, id):
extras = []
extras.append(''); # id = 0
extras.append('-защитник'); # id = 1
extras.append('-лекарь'); # id = 2
extras.append('-спортсмен'); # id = 3
return '{}{}'.format(name, extras[id % 4])
def get_achievements():
achievements = []
for i in xrange(12):
achievements.append({})
achievements[0] = ("quests_count", "Выполнить задания")
achievements[1] = ("total_result", "Общий результат")
achievements[2] = ("total_number_of_moves", "Сделать несколько подходов")
achievements[3] = ("max_competition_result", "Рекорд соревнования")
achievements[4] = ("competitions", "Поучаствовать в соревнованиях")
achievements[5] = ("wins", "Победить в соревнованиях")
achievements[6] = ("training_days", "Дни тренировок")
achievements[7] = ("max_weekly_result", "Максимум за неделю")
achievements[8] = ("max_monthly_result", "Максимум за месяц")
achievements[9] = ("weekly_greater_100_periods_cnt", "Неделя с результатом больше 100")
achievements[10] = ("weekly_greater_300_periods_cnt", "Неделя с результатом больше 300")
achievements[11] = ("weekly_greater_700_periods_cnt", "Неделя с результатом больше 700")
return achievements
def get_knowledge_categories():
arr = []
for i in xrange(6):
arr.append('')
arr[0] = ("Общая информация",10)
arr[1] = ("Характеристики персонажа", 20)
arr[2] = ("Специализация", 30)
arr[3] = ("Соревнования", 40)
arr[4] = ("Навыки", 50)
arr[5] = ("Часто задаваемые вопросы", 60)
return arr | [
"forest.get_max_character_position",
"forest.get_locations_count"
] | [((3879, 3907), 'forest.get_locations_count', 'forest.get_locations_count', ([], {}), '()\n', (3905, 3907), False, 'import forest\n'), ((3947, 3982), 'forest.get_max_character_position', 'forest.get_max_character_position', ([], {}), '()\n', (3980, 3982), False, 'import forest\n')] |
from typing import Any, Dict, List, Tuple
from abc import ABC, abstractmethod
import gym
import numpy as np
import pybullet as p
import pybullet_data as pbd
import pybullet_utils.bullet_client as bc
import random
from gym_solo.core import termination as terms
from gym_solo.core import configs
from gym_solo.core import obs
from gym_solo.core import rewards
import gym_solo.solo_types as solo_types
class Solo8BaseEnv(ABC, gym.Env):
"""Solo 8 abstract base environment."""
metadata = {'render.modes': ['rgb_array']}
def __init__(self, config: configs.Solo8BaseConfig, use_gui: bool,
normalize_observations: bool = False):
"""Create a solo8 env.
Args:
config (configs.Solo8BaseConfig): The SoloConfig. Defaults to None.
use_gui (bool): Whether or not to show the pybullet GUI.
normalize_observation (bool): Normalize the observations? Defaults to
True
"""
self.config = config
self.client = bc.BulletClient(
connection_mode=p.GUI if use_gui else p.DIRECT)
self.client.setAdditionalSearchPath(pbd.getDataPath())
self.client.setGravity(*self.config.gravity)
if self.config.dt:
self.client.setPhysicsEngineParameter(fixedTimeStep=self.config.dt,
numSubSteps=1)
else:
self.client.setRealTimeSimulation(1)
self.client_configuration()
self.plane = self.client.loadURDF('plane.urdf')
self.load_bodies()
self.obs_factory = obs.ObservationFactory(self.client,
normalize=normalize_observations)
self.reward_factory = rewards.RewardFactory(self.client)
self.termination_factory = terms.TerminationFactory()
self.reset(init_call=True)
@abstractmethod
def load_bodies(self):
"""Load the bodies into the environment.
Note that a plane has already been loaded in and the entire environment
is encapsulated within the self.client object. Thus, all bodies should
be added via the self.client interface.
"""
pass
@property
@abstractmethod
def action_space(self) -> gym.Space:
"""Get the action space of the agent.
Returns:
gym.Space: A Space representing the domain of valid moves for the
agent.
"""
pass
@abstractmethod
def reset(self, init_call: bool = False):
"""Reset the environment.
For best results, this method should be deterministic; i.e. the environment
should return to the same state everytime this method is called.
Args:
init_call (bool, optional): If this function is being called from the init
function. Defaults to False.
"""
pass
@abstractmethod
def step(self, action: List[float]) -> Tuple[solo_types.obs, float, bool,
Dict[Any, Any]]:
"""Have the agent apply the action in the environment.
Args:
action (List[float]): The action for the agent to take. Requires that
this conforms to self.action_space.
Returns:
Tuple[solo_types.obs, float, bool, Dict[Any, Any]]: A tuple of the next
observation, the reward for that step, whether or not the episode
terminates, and an info dict for misc diagnostic details.
"""
pass
@property
def observation_space(self) -> gym.Space:
"""Get the agent's observation space.
Returns:
gym.Space: The agent's observation space.
"""
return self.obs_factory.get_observation_space()
def render(self, mode='rgb_array') -> List[List[List[int]]]:
"""Render the current state of the Solo 8 env.
Note that the camera is controlled by the `config` option passed in at
construction time.
Args:
mode (str, optional): Rendering mode. Refer to OpenAI Gym documentation.
Defaults to 'rgb_array'.
Returns:
List[List[List[int]]]: a 2D list of RGBA pixel data.
"""
proj_matrix = self.client.computeProjectionMatrixFOV(
self.config.render_fov, self.config.render_aspect,
0.01, 100)
view_matrix = self.client.computeViewMatrixFromYawPitchRoll(
self.config.render_pos, self.config.render_cam_distance,
self.config.render_yaw, self.config.render_pitch,
self.config.render_roll, 2)
w, h, rgb, _, _ = self.client.getCameraImage(
self.config.render_width, self.config.render_height, view_matrix,
proj_matrix)
# 4 Channels for RGBA
return np.reshape(rgb, (h, w, 4))
def client_configuration(self):
"""An overridable method if a child class needs to directly interact
with the PyBullet env at init time.
"""
pass
def _close(self):
"""Soft shutdown the environment."""
self.client.disconnect()
def _seed(self, seed: int) -> None:
"""Set the seeds for random and numpy
Args:
seed (int): The seed to set
"""
np.random.seed(seed)
random.seed(seed) | [
"numpy.reshape",
"gym_solo.core.termination.TerminationFactory",
"pybullet_data.getDataPath",
"random.seed",
"numpy.random.seed",
"pybullet_utils.bullet_client.BulletClient",
"gym_solo.core.obs.ObservationFactory",
"gym_solo.core.rewards.RewardFactory"
] | [((964, 1027), 'pybullet_utils.bullet_client.BulletClient', 'bc.BulletClient', ([], {'connection_mode': '(p.GUI if use_gui else p.DIRECT)'}), '(connection_mode=p.GUI if use_gui else p.DIRECT)\n', (979, 1027), True, 'import pybullet_utils.bullet_client as bc\n'), ((1487, 1556), 'gym_solo.core.obs.ObservationFactory', 'obs.ObservationFactory', (['self.client'], {'normalize': 'normalize_observations'}), '(self.client, normalize=normalize_observations)\n', (1509, 1556), False, 'from gym_solo.core import obs\n'), ((1630, 1664), 'gym_solo.core.rewards.RewardFactory', 'rewards.RewardFactory', (['self.client'], {}), '(self.client)\n', (1651, 1664), False, 'from gym_solo.core import rewards\n'), ((1696, 1722), 'gym_solo.core.termination.TerminationFactory', 'terms.TerminationFactory', ([], {}), '()\n', (1720, 1722), True, 'from gym_solo.core import termination as terms\n'), ((4467, 4493), 'numpy.reshape', 'np.reshape', (['rgb', '(h, w, 4)'], {}), '(rgb, (h, w, 4))\n', (4477, 4493), True, 'import numpy as np\n'), ((4888, 4908), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4902, 4908), True, 'import numpy as np\n'), ((4913, 4930), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4924, 4930), False, 'import random\n'), ((1075, 1092), 'pybullet_data.getDataPath', 'pbd.getDataPath', ([], {}), '()\n', (1090, 1092), True, 'import pybullet_data as pbd\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'client.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(850, 671)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 70, 54, 12))
self.label.setObjectName("label")
self.pc1 = QtWidgets.QLabel(self.centralwidget)
self.pc1.setGeometry(QtCore.QRect(10, 90, 111, 151))
self.pc1.setText("")
self.pc1.setPixmap(QtGui.QPixmap("Pic/53.GIF"))
self.pc1.setObjectName("pc1")
self.pc2 = QtWidgets.QLabel(self.centralwidget)
self.pc2.setGeometry(QtCore.QRect(140, 90, 111, 151))
self.pc2.setText("")
self.pc2.setPixmap(QtGui.QPixmap("Pic/53.GIF"))
self.pc2.setObjectName("pc2")
self.pc3 = QtWidgets.QLabel(self.centralwidget)
self.pc3.setGeometry(QtCore.QRect(270, 90, 111, 151))
self.pc3.setText("")
self.pc3.setPixmap(QtGui.QPixmap("Pic/53.GIF"))
self.pc3.setObjectName("pc3")
self.pc4 = QtWidgets.QLabel(self.centralwidget)
self.pc4.setGeometry(QtCore.QRect(400, 90, 111, 151))
self.pc4.setText("")
self.pc4.setPixmap(QtGui.QPixmap("Pic/53.GIF"))
self.pc4.setObjectName("pc4")
self.pc5 = QtWidgets.QLabel(self.centralwidget)
self.pc5.setGeometry(QtCore.QRect(530, 90, 111, 151))
self.pc5.setText("")
self.pc5.setPixmap(QtGui.QPixmap("Pic/53.GIF"))
self.pc5.setObjectName("pc5")
self.public_cards = [self.pc1, self.pc2, self.pc3, self.pc4, self.pc5]
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(10, 290, 711, 331))
self.tableWidget.setAutoFillBackground(False)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(8)
self.tableWidget.setHorizontalHeaderLabels(['昵称', 'id', '状态', '下注', '财富', '牌1', '牌2', '最大牌型'])
self.label_bb = QtWidgets.QLabel(self.centralwidget)
self.label_bb.setGeometry(QtCore.QRect(20, 30, 71, 16))
self.label_bb.setObjectName("label_bb")
self.label_leftplayers = QtWidgets.QLabel(self.centralwidget)
self.label_leftplayers.setGeometry(QtCore.QRect(140, 30, 101, 16))
self.label_leftplayers.setObjectName("label_leftplayers")
self.label_pot = QtWidgets.QLabel(self.centralwidget)
self.label_pot.setGeometry(QtCore.QRect(280, 30, 101, 16))
self.label_pot.setObjectName("label_pot")
self.label_maxbet = QtWidgets.QLabel(self.centralwidget)
self.label_maxbet.setGeometry(QtCore.QRect(390, 30, 101, 16))
self.label_maxbet.setObjectName("label_maxbet")
self.label_timer = QtWidgets.QLabel(self.centralwidget)
self.label_timer.setStyleSheet("font:30pt '楷体';color: rgb(255, 255, 255);")
self.label_timer.setAlignment(QtCore.Qt.AlignCenter)
self.label_timer.setGeometry(QtCore.QRect(730, 20, 40, 50))
self.widget_action = QtWidgets.QWidget(self.centralwidget)
self.widget_action.setGeometry(QtCore.QRect(660, 90, 191, 141))
self.widget_action.setObjectName("widget_action")
self.verticalLayoutWidget = QtWidgets.QWidget(self.widget_action)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 40, 171, 91))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_fold = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_fold.setObjectName("pushButton_fold")
self.verticalLayout.addWidget(self.pushButton_fold)
self.pushButton_checkcall = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_checkcall.setObjectName("pushButton_checkcall")
self.verticalLayout.addWidget(self.pushButton_checkcall)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEdit_raise = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_raise.setObjectName("lineEdit_raise")
self.horizontalLayout.addWidget(self.lineEdit_raise)
self.pushButton_raise = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_raise.setObjectName("pushButton_raise")
self.horizontalLayout.addWidget(self.pushButton_raise)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label_2 = QtWidgets.QLabel(self.widget_action)
self.label_2.setGeometry(QtCore.QRect(70, 10, 48, 20))
self.label_2.setObjectName("label_2")
font = QtGui.QFont()
font.setPointSize(20)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 850, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def update_table(self, row_num):
width = 50
height = 60
self.tableWidget.setRowCount(row_num)
self.tableWidget.setIconSize(QtCore.QSize(width, height))
for i in range(5, 7): # 让列宽和图片相同
self.tableWidget.setColumnWidth(i, width)
for i in range(row_num): # 让行高和图片相同
self.tableWidget.setRowHeight(i, height)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "德州扑克好友版"))
self.label.setText(_translate("MainWindow", "公共牌"))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.tableWidget.setSortingEnabled(__sortingEnabled)
self.label_bb.setText(_translate("MainWindow", "大盲位id:"))
self.label_leftplayers.setText(_translate("MainWindow", "剩余玩家数:"))
self.label_pot.setText(_translate("MainWindow", "底池:"))
self.label_maxbet.setText(_translate("MainWindow", "当前最大下注:"))
self.pushButton_fold.setText(_translate("MainWindow", "弃牌"))
self.pushButton_checkcall.setText(_translate("MainWindow", "跟注/Check"))
self.pushButton_raise.setText(_translate("MainWindow", "加注"))
self.label_2.setText(_translate("MainWindow", "采取动作"))
self.widget_action.setVisible(False)
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
class MyMainForm(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MyMainForm, self).__init__(parent)
self.setupUi(self)
if __name__ == "__main__":
#固定的,PyQt5程序都需要QApplication对象。sys.argv是命令行参数列表,确保程序可以双击运行
app = QApplication(sys.argv)
#初始化
myWin = MyMainForm()
palette1 = QtGui.QPalette()
palette1.setColor(QtGui.QPalette.Background, QtGui.QColor(192, 253, 123)) # 设置背景颜色
# palette1.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap('../../../Document/images/17_big.jpg'))) # 设置背景图片
myWin.setPalette(palette1)
#将窗口控件显示在屏幕上
myWin.show()
#程序运行,sys.exit方法确保程序完整退出。
sys.exit(app.exec_()) | [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtGui.QPalette",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtGui.QFont",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtCore.QRect",
"PyQt5.QtGui.QPixmap",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtCore.QSize",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QMenuBar"
] | [((7469, 7491), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (7481, 7491), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow\n'), ((7541, 7557), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (7555, 7557), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((415, 444), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (432, 444), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((525, 561), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (541, 561), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((684, 720), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (700, 720), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((924, 960), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (940, 960), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1165, 1201), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1181, 1201), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1406, 1442), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1422, 1442), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1647, 1683), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1663, 1683), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1975, 2017), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1997, 2017), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2367, 2403), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2383, 2403), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2549, 2585), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2565, 2585), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2752, 2788), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2768, 2788), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2934, 2970), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2950, 2970), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3125, 3161), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3141, 3161), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3405, 3442), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3422, 3442), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3609, 3646), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.widget_action'], {}), '(self.widget_action)\n', (3626, 3646), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3826, 3874), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (3847, 3874), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4025, 4073), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4046, 4073), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4232, 4280), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4253, 4280), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4450, 4473), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (4471, 4473), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4568, 4614), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4587, 4614), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4768, 4816), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4789, 4816), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5028, 5064), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.widget_action'], {}), '(self.widget_action)\n', (5044, 5064), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5190, 5203), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5201, 5203), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5314, 5344), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (5332, 5344), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5522, 5554), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (5542, 5554), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5701, 5750), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (5738, 5750), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7607, 7634), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(192)', '(253)', '(123)'], {}), '(192, 253, 123)\n', (7619, 7634), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((593, 621), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(70)', '(54)', '(12)'], {}), '(20, 70, 54, 12)\n', (605, 621), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((750, 780), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(90)', '(111)', '(151)'], {}), '(10, 90, 111, 151)\n', (762, 780), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((838, 865), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""Pic/53.GIF"""'], {}), "('Pic/53.GIF')\n", (851, 865), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((990, 1021), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(140)', '(90)', '(111)', '(151)'], {}), '(140, 90, 111, 151)\n', (1002, 1021), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1079, 1106), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""Pic/53.GIF"""'], {}), "('Pic/53.GIF')\n", (1092, 1106), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1231, 1262), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(270)', '(90)', '(111)', '(151)'], {}), '(270, 90, 111, 151)\n', (1243, 1262), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1320, 1347), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""Pic/53.GIF"""'], {}), "('Pic/53.GIF')\n", (1333, 1347), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1472, 1503), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(400)', '(90)', '(111)', '(151)'], {}), '(400, 90, 111, 151)\n', (1484, 1503), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1561, 1588), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""Pic/53.GIF"""'], {}), "('Pic/53.GIF')\n", (1574, 1588), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1713, 1744), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(530)', '(90)', '(111)', '(151)'], {}), '(530, 90, 111, 151)\n', (1725, 1744), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1802, 1829), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""Pic/53.GIF"""'], {}), "('Pic/53.GIF')\n", (1815, 1829), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2055, 2086), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(290)', '(711)', '(331)'], {}), '(10, 290, 711, 331)\n', (2067, 2086), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2438, 2466), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(30)', '(71)', '(16)'], {}), '(20, 30, 71, 16)\n', (2450, 2466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2629, 2659), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(140)', '(30)', '(101)', '(16)'], {}), '(140, 30, 101, 16)\n', (2641, 2659), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2824, 2854), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(280)', '(30)', '(101)', '(16)'], {}), '(280, 30, 101, 16)\n', (2836, 2854), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3009, 3039), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(390)', '(30)', '(101)', '(16)'], {}), '(390, 30, 101, 16)\n', (3021, 3039), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3344, 3373), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(730)', '(20)', '(40)', '(50)'], {}), '(730, 20, 40, 50)\n', (3356, 3373), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3482, 3513), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(660)', '(90)', '(191)', '(141)'], {}), '(660, 90, 191, 141)\n', (3494, 3513), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3693, 3722), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(40)', '(171)', '(91)'], {}), '(10, 40, 171, 91)\n', (3705, 3722), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5098, 5126), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(70)', '(10)', '(48)', '(20)'], {}), '(70, 10, 48, 20)\n', (5110, 5126), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5378, 5405), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(850)', '(23)'], {}), '(0, 0, 850, 23)\n', (5390, 5405), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5911, 5938), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['width', 'height'], {}), '(width, height)\n', (5923, 5938), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
from argparse import ArgumentParser
import pytest
import zum
from zum.cli.core import dispatcher, get_config_file_name
from zum.constants import DEFAULT_CONFIG_FILE_NAME
class TestDispatcher:
def setup_method(self):
self.invalid_config_file_name = "invalid.toml"
self.valid_config_file_name = "valid.toml"
self.endpoint_name = "exmple"
self.configs = (
"[metadata]\n"
'server = "http://localhost:8000"\n'
f"[endpoints.{self.endpoint_name}]\n"
'route = "/example"\n'
'method = "get"\n'
)
def test_help_flag(self, capsys):
with pytest.raises(SystemExit):
dispatcher(["--help"])
captured = capsys.readouterr().out
assert "Command line interface tool for zum." in captured
def test_version_flag(self, capsys):
with pytest.raises(SystemExit):
dispatcher(["--version"])
captured = capsys.readouterr().out
assert f"zum version {zum.__version__}" in captured
def test_invalid_config_file(self, tmpdir, capsys):
invalid_config_file = tmpdir.join(self.invalid_config_file_name)
valid_config_file = tmpdir.join(self.valid_config_file_name)
with open(valid_config_file.strpath, "w") as raw_config_file:
raw_config_file.write(self.configs)
with pytest.raises(SystemExit):
dispatcher(["--file", invalid_config_file.strpath, "--help"])
captured = capsys.readouterr().out
assert "No config file" in captured
assert self.invalid_config_file_name in captured
def test_valid_config_file(self, tmpdir, capsys):
valid_config_file = tmpdir.join(self.valid_config_file_name)
with open(valid_config_file.strpath, "w") as raw_config_file:
raw_config_file.write(self.configs)
with pytest.raises(SystemExit):
dispatcher(["--file", valid_config_file.strpath, "--help"])
captured = capsys.readouterr().out
assert "No config file" not in captured
assert f"{{{self.endpoint_name}}}" in captured
class TestGetConfigFileName:
def setup_method(self):
self.file_name = "custom.toml"
def test_empty_call(self):
file_name = get_config_file_name()
assert file_name == DEFAULT_CONFIG_FILE_NAME
def test_filled_call(self):
file_name = get_config_file_name(["--file", self.file_name])
assert file_name == self.file_name
| [
"pytest.raises",
"zum.cli.core.dispatcher",
"zum.cli.core.get_config_file_name"
] | [((2270, 2292), 'zum.cli.core.get_config_file_name', 'get_config_file_name', ([], {}), '()\n', (2290, 2292), False, 'from zum.cli.core import dispatcher, get_config_file_name\n'), ((2399, 2447), 'zum.cli.core.get_config_file_name', 'get_config_file_name', (["['--file', self.file_name]"], {}), "(['--file', self.file_name])\n", (2419, 2447), False, 'from zum.cli.core import dispatcher, get_config_file_name\n'), ((647, 672), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (660, 672), False, 'import pytest\n'), ((686, 708), 'zum.cli.core.dispatcher', 'dispatcher', (["['--help']"], {}), "(['--help'])\n", (696, 708), False, 'from zum.cli.core import dispatcher, get_config_file_name\n'), ((873, 898), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (886, 898), False, 'import pytest\n'), ((912, 937), 'zum.cli.core.dispatcher', 'dispatcher', (["['--version']"], {}), "(['--version'])\n", (922, 937), False, 'from zum.cli.core import dispatcher, get_config_file_name\n'), ((1372, 1397), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1385, 1397), False, 'import pytest\n'), ((1411, 1472), 'zum.cli.core.dispatcher', 'dispatcher', (["['--file', invalid_config_file.strpath, '--help']"], {}), "(['--file', invalid_config_file.strpath, '--help'])\n", (1421, 1472), False, 'from zum.cli.core import dispatcher, get_config_file_name\n'), ((1874, 1899), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1887, 1899), False, 'import pytest\n'), ((1913, 1972), 'zum.cli.core.dispatcher', 'dispatcher', (["['--file', valid_config_file.strpath, '--help']"], {}), "(['--file', valid_config_file.strpath, '--help'])\n", (1923, 1972), False, 'from zum.cli.core import dispatcher, get_config_file_name\n')] |
import abc
import numpy as np
class Policy(object):
def __init__(self, config):
"""
Base class for all policies, has an abstract method predict().
"""
self.trainable = False
self.phase = None
self.model = None
self.device = None
self.last_state = None
self.time_step = None
# if agent is assumed to know the dynamics of real world
self.env = None
self.config = config
@abc.abstractmethod
def predict(self, state):
"""
Policy takes state as input and output an action
"""
return
@staticmethod
def reach_destination(state):
self_state = state.self_state
if np.linalg.norm((self_state.py - self_state.gy, self_state.px - self_state.gx)) < self_state.radius:
return True
else:
return False
| [
"numpy.linalg.norm"
] | [((725, 803), 'numpy.linalg.norm', 'np.linalg.norm', (['(self_state.py - self_state.gy, self_state.px - self_state.gx)'], {}), '((self_state.py - self_state.gy, self_state.px - self_state.gx))\n', (739, 803), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph construction for dual verification: Lagrangian calculation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow as tf
class DualVerification(snt.AbstractModule):
"""Module to represent a network's Lagrangian, in terms of dual variables."""
def __init__(self, verification_strategy,
verifiable_layers,
target_strategy=None,
get_dual_variable=tf.get_variable,
name='dual_verification'):
"""Initialises the dual verification module.
Args:
verification_strategy: strategy object defining the dual verification
formulation, including what dual variables exist for each layer.
verifiable_layers: List of `VerifiableLayer` objects specifying
linear layers and non-linear activation functions.
target_strategy: target_objective_strategy object defining the objective
to optimize for the Lagrangian, default set to None, in which case we
will use the standard verification objective.
get_dual_variable: Function(name, shape, dtype) returning a dual variable.
It will be invoked by keyword arguments, so its arguments must be named
as given here, but may occur in any order.
name: Optional name for the module, defaulting to 'dual_verification'.
"""
super(DualVerification, self).__init__(name=name)
self._verification_strategy = verification_strategy
self._verifiable_layers = verifiable_layers
self._target_strategy = target_strategy
self._get_dual_variable = get_dual_variable
def _build(self, labels, num_batches, current_batch,
margin=0.,
objective_computation_config=None,
dataset_size=None):
"""Sets the up dual objective for the given network.
Dual variables are allocated for the entire dataset, covering all batches
as specified by `num_batches`. The dual objective accesses a slice of the
the dual variables specified by `current_batch`.
Args:
labels: 1D integer tensor of shape (batch_size) of labels for each
input example.
num_batches: Total number of batches in the dataset.
current_batch: 0D integer tensor containing index of current batch.
margin: Dual objective values for correct class will be forced to
`-margin`, thus disregarding large negative bounds when maximising.
objective_computation_config: Additional parameters for dual obj.
dataset_size: Size of dataset across all batches. By default this is
inferred from `num_batches * labels.shape[0]`, but can be set explictly
if not known at graph build time.
Returns:
2D tensor of shape (num_targets, batch_size) containing dual objective
values for each (class, example).
"""
# Dual variable generation across all batches.
if dataset_size is None:
batch_size = labels.shape[0]
dataset_size = num_batches * batch_size
else:
batch_size = tf.shape(labels)[0]
batch_lo = current_batch * batch_size
batch_hi = batch_lo + batch_size
def dual_var_getter(name, shape, dtype):
"""Creates a trainable tf.Variable for each dual variables."""
dual_var = self._get_dual_variable(name=name,
dtype=dtype,
shape=(shape[:1] + [dataset_size] +
shape[2:]))
# Return directly the tf.Variable if possible.
if dataset_size == batch_size:
return dual_var
# Select correct slice of dual variables for current batch.
sliced = dual_var[:, batch_lo:batch_hi]
sliced.set_shape(shape)
return sliced
(dual_obj, self._dual_var_lists, self._project_duals_op,
self._supporting_ops) = (
self._verification_strategy.create_duals_and_build_objective(
self._verifiable_layers,
labels,
dual_var_getter,
margin=margin,
target_strategy=self._target_strategy,
objective_computation_config=objective_computation_config))
return dual_obj
@property
def dual_var_lists(self):
"""TensorFlow variables for all dual variables."""
self._ensure_is_connected()
return self._dual_var_lists
@property
def project_duals_op(self):
"""TensorFlow operation to project all dual variables to their bounds."""
self._ensure_is_connected()
return self._project_duals_op
@property
def init_duals_op(self):
"""TensorFlow operation to initialize dual variables."""
return self.supporting_ops['init']
@property
def supporting_ops(self):
"""Additional TF ops (e.g. initialization) for the dual variables."""
self._ensure_is_connected()
return self._supporting_ops
def dual_variables_by_name(self, names):
"""Get dual variables by name."""
return _dual_variables_by_name(names, self.dual_var_lists)
def _dual_variables_by_name(names, dual_var_lists):
dual_vars = []
for dual_var_list in dual_var_lists:
for child_dual_var_lists in dual_var_list[:-1]:
dual_vars.extend(_dual_variables_by_name(names, child_dual_var_lists))
dual = dual_var_list[-1]
if dual is not None:
dual_vars.extend([dual[name] for name in names if name in dual])
return dual_vars
| [
"tensorflow.shape"
] | [((3684, 3700), 'tensorflow.shape', 'tf.shape', (['labels'], {}), '(labels)\n', (3692, 3700), True, 'import tensorflow as tf\n')] |
from django import forms
#class PredictForm(forms.Form):
# industry = forms.CharField(label='Your Name', max_length=100)
# sub_vertical =
# investment =
class UniqueForm(forms.Form):
your_num = forms.CharField(label='Your number')
| [
"django.forms.CharField"
] | [((210, 246), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Your number"""'}), "(label='Your number')\n", (225, 246), False, 'from django import forms\n')] |
#%%
from my_kmv import get_final_res_from_code
def get_tuple_list(codes, start_date, end_date):
tuple_list = []
mode = "v2"
for code in codes:
try:
_, _, df = get_final_res_from_code(code, start_date, end_date, mode)
tuple_list += list(zip(df["DD"], df["roe_dt"]))
except:
print(df["DD"], df["roe_dt"])
print(code, "have failed")
return tuple_list
#%%
code_dict = {
"南京银行": "601009.SH",
"北京银行": "601169.SH",
"宁波银行": "002142.SZ"
}
tuple_list = get_tuple_list(code_dict.values(), "20100101", "20201231")
# %%
import matplotlib.pyplot as plt
figure = plt.figure()
plt.hist([x[0] for x in tuple_list], bins = 20)
plt.savefig("figure_chang/chang_DD_hist.png")
plt.close()
# %%
figure = plt.figure()
plt.hist([x[1] for x in tuple_list], bins = 20)
plt.savefig("figure_chang/chang_roe_hist.png")
plt.close()
# dd -> range
array_x = []
array_y = []
import numpy as np
for x in tuple_list:
array_x.append(x[0])
array_y.append(x[1])
#print(np.mean(array_x))
#print(np.std(array_x,ddof=1))
xiaxian1 = np.mean(array_x) - 2*np.std(array_x,ddof=1)
shangxian1 = np.mean(array_x) + 2*np.std(array_x,ddof=1)
per20 = np.nanpercentile(array_y,20)#
jiange1 = (shangxian1 - xiaxian1)/20
# %%
range_list = [(xiaxian1 + i * jiange1, xiaxian1 + (i+1) * jiange1) for i in range(20)]
count_dict = {i: [0, 0] for i in range(-1, 21)}
for x in tuple_list:
if x[0] <= range_list[0][0]:
count_dict[-1][0] += 1
if x[1] < per20:
count_dict[-1][1] += 1
continue
if x[0] > range_list[-1][1]:
count_dict[20][0] += 1
if x[1] < per20:
count_dict[20][1] += 1
continue
for j, range_ in enumerate(range_list):
if x[0] > range_[0] and x[0] <= range_[1]:
count_dict[j][0] += 1
if x[1] < per20:
count_dict[j][1] += 1
break
count_dict
# %%
for key, value in count_dict.items():
if key == -1:
print("(-inf,%s)"%xiaxian1, value[1] / value[0] if value[0] != 0 else 0)
elif key == 20:
print("(%s,inf)"%shangxian1, value[1] / value[0] if value[0] != 0 else 0)
else:
print(range_list[key], value[1] / value[0] if value[0] != 0 else 0)
# %%
import numpy as np
midx = [(range_list[i][0] + range_list[i][0]) / 2 for i in range(20)]
midy = [
count_dict[i][1] / count_dict[i][0] if count_dict[i][0] != 0 else 0
for i in range(20)
]
import random
#a = np.polyfit(samplex, sampley, 3)
#b = np.poly1d(a)
def b(dd,midx,midy):
epsilon = random.random() * 0.05
if dd >= midx[0] and dd < midx[19]:
for i in range(0,18):
if dd >= midx[i] and dd < midx[i+1]:
y = midy[i] + (dd - midx[i]) * (midy[i+1] - midy[i]) / (midx[i+1] - midx[i])
return y + epsilon if y == 0.0 else y
else: return epsilon
import pickle as pk
# %%
pk.dump(midx, open("data/midx.pk", "wb"))
pk.dump(midy, open("data/midy.pk", "wb"))
pk.dump(b, open("data/edf_fun.pk", "wb"))
# %%
#import matplotlib.pyplot as plt
#plot_x = [(-0.5 + i / 1000) for i in range(1, 1000)]
#plot_y = [b(x) for x in plot_x]
#plt.plot(plot_x, plot_y)
#plt.savefig("figure_chang/zhexian.png")
# %%
| [
"numpy.mean",
"matplotlib.pyplot.hist",
"numpy.nanpercentile",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"my_kmv.get_final_res_from_code",
"numpy.std",
"random.random"
] | [((646, 658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (656, 658), True, 'import matplotlib.pyplot as plt\n'), ((659, 704), 'matplotlib.pyplot.hist', 'plt.hist', (['[x[0] for x in tuple_list]'], {'bins': '(20)'}), '([x[0] for x in tuple_list], bins=20)\n', (667, 704), True, 'import matplotlib.pyplot as plt\n'), ((707, 752), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure_chang/chang_DD_hist.png"""'], {}), "('figure_chang/chang_DD_hist.png')\n", (718, 752), True, 'import matplotlib.pyplot as plt\n'), ((753, 764), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (762, 764), True, 'import matplotlib.pyplot as plt\n'), ((780, 792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (790, 792), True, 'import matplotlib.pyplot as plt\n'), ((793, 838), 'matplotlib.pyplot.hist', 'plt.hist', (['[x[1] for x in tuple_list]'], {'bins': '(20)'}), '([x[1] for x in tuple_list], bins=20)\n', (801, 838), True, 'import matplotlib.pyplot as plt\n'), ((841, 887), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure_chang/chang_roe_hist.png"""'], {}), "('figure_chang/chang_roe_hist.png')\n", (852, 887), True, 'import matplotlib.pyplot as plt\n'), ((888, 899), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (897, 899), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1236), 'numpy.nanpercentile', 'np.nanpercentile', (['array_y', '(20)'], {}), '(array_y, 20)\n', (1223, 1236), True, 'import numpy as np\n'), ((1098, 1114), 'numpy.mean', 'np.mean', (['array_x'], {}), '(array_x)\n', (1105, 1114), True, 'import numpy as np\n'), ((1155, 1171), 'numpy.mean', 'np.mean', (['array_x'], {}), '(array_x)\n', (1162, 1171), True, 'import numpy as np\n'), ((1119, 1142), 'numpy.std', 'np.std', (['array_x'], {'ddof': '(1)'}), '(array_x, ddof=1)\n', (1125, 1142), True, 'import numpy as np\n'), ((1176, 1199), 'numpy.std', 'np.std', (['array_x'], {'ddof': '(1)'}), '(array_x, ddof=1)\n', (1182, 1199), True, 'import numpy as np\n'), ((2585, 2600), 'random.random', 'random.random', ([], {}), '()\n', (2598, 2600), False, 'import random\n'), ((193, 250), 'my_kmv.get_final_res_from_code', 'get_final_res_from_code', (['code', 'start_date', 'end_date', 'mode'], {}), '(code, start_date, end_date, mode)\n', (216, 250), False, 'from my_kmv import get_final_res_from_code\n')] |
from PIL import Image, ImageDraw, ImageFont
def bgsreportimg():
img = Image.new('RGB', (200, 200))
d1 = ImageDraw.Draw(img)
d1.text((65, 10), "Sample Text", fill=(255, 0, 0))
img.save('temp/bgsreport.png')
bgsreportimg()
| [
"PIL.Image.new",
"PIL.ImageDraw.Draw"
] | [((76, 104), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(200, 200)'], {}), "('RGB', (200, 200))\n", (85, 104), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((115, 134), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (129, 134), False, 'from PIL import Image, ImageDraw, ImageFont\n')] |
#!/usr/bin/env python3
#p3_190212_2243.py
# Telegram bot calculator
# Bot model without webhooks
import requests
import re
REGEXP = '\d+\s*[\+\-\*\/]{1}\s*\d+'
class Calculator:
def __init__(self):
token = '690888180:<KEY>'
self.url_bot = 'https://api.telegram.org/bot' + token + '/'
self.regex = re.compile(REGEXP)
def get_updates(self):
url = self.get_url('getUpdates', None)
resp = requests.get(url)
return resp.json()
def get_message(self):
data = self.get_updates()
last_message = data['result'][-1]
chat_id = last_message['message']['from']['id']
message_text = last_message['message']['text']
message_info = {'room_id': chat_id, 'msg_text': message_text}
return message_info
def send_message(self, cid, text):
message_args = {'room_id': cid, 'msg_text': text}
url = self.get_url('sendMessage', message_args)
check_code = requests.get(url)
def get_url(self, request, params):
url = self.url_bot + request
if params:
cid = params['room_id']
text = params['text']
url += '?chat_id={}&text={}'.format(cid, text)
return url
def calculate(self, message):
exp1 = list()
exp1 = re.findall(self.regex, message)
if exp1:
print(eval(exp1[0]))
def main():
win1 = Calculator()
print(win1.get_message())
if __name__ == '__main__':
main()
| [
"re.findall",
"requests.get",
"re.compile"
] | [((327, 345), 're.compile', 're.compile', (['REGEXP'], {}), '(REGEXP)\n', (337, 345), False, 'import re\n'), ((435, 452), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (447, 452), False, 'import requests\n'), ((966, 983), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (978, 983), False, 'import requests\n'), ((1299, 1330), 're.findall', 're.findall', (['self.regex', 'message'], {}), '(self.regex, message)\n', (1309, 1330), False, 'import re\n')] |
Subsets and Splits