code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Copyright (c) Gradient Institute. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Example of how to use the causal inspection tools with simple models."""
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from cinspect.evaluators import (
PartialDependanceEvaluator,
PermutationImportanceEvaluator,
)
from cinspect.model_evaluation import bootcross_model
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV, GroupKFold
from sklearn.utils import check_random_state
from simulations.datagen import DGPGraph
# Logging
LOG = logging.getLogger(__name__)
# Log INFO to STDOUT
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler()])
def data_generation(alpha=0.3, n_x=30, support_size=5, random_state=None):
"""Specify the data generation process.
This is just a simple "triangle" model with linear relationships.
X: confounding factors
T: treatment
Y: outcome
Casual relationships are X->T, X->Y, T->Y.
This is for a *continuous* treatment variable.
"""
rng = check_random_state(random_state)
coefs_T = np.zeros(n_x)
coefs_T[0:support_size] = rng.normal(1, 1, size=support_size)
coefs_Y = np.zeros(n_x)
coefs_Y[0:support_size] = rng.uniform(0, 1, size=support_size)
def fX(n):
return rng.normal(0, 1, size=(n, n_x))
def fT(X, n):
return X @ coefs_T + rng.uniform(-1, 1, size=n)
def fY(X, T, n):
return alpha * T + X @ coefs_Y + rng.uniform(-1, 1, size=n)
dgp = DGPGraph()
dgp.add_node("X", fX)
dgp.add_node("T", fT, parents=["X"])
dgp.add_node("Y", fY, parents=["X", "T"])
return dgp
def main():
"""Run the simulation."""
dgp = data_generation()
# Show the data generation graph
dgp.draw_graph()
plt.figure()
# Generate data for the scenario
data = dgp.sample(1000)
# Generate interventional data for plotting the average causal effect for
# each intervention level.
s = 100
T_min, T_max = data["T"].min(), data["T"].max()
T_levels = np.linspace(T_min, T_max, 20)
te = [dgp.sample(n=s, interventions={"T": t})["Y"] for t in T_levels]
ate = np.mean(te, axis=1)
ste_ate = np.std(te, ddof=1) / np.sqrt(s)
# plot the "causal effect" for each treatment level
plt.fill_between(T_levels, ate + ste_ate, ate - ste_ate, alpha=0.5)
plt.plot(T_levels, ate, "r")
plt.title("Average treatment effect from the simulation.")
plt.xlabel("T")
plt.ylabel("Y")
Y = data.pop("Y")
dX = data.pop("X")
data.update({f"X{i}": x for i, x in enumerate(dX.T)})
X = pd.DataFrame(data)
# Model selection
# GroupKFold is used to make sure grid search does not use the same samples
# from the bootstrapping procedure later in the training and testing folds
model = GridSearchCV(
GradientBoostingRegressor(),
param_grid={"max_depth": [1, 2]},
cv=GroupKFold(n_splits=5),
)
# Casual estimation
pdeval = PartialDependanceEvaluator(feature_grids={"T": "auto"})
pieval = PermutationImportanceEvaluator(n_repeats=5)
bootcross_model(
model, X, Y, [pdeval, pieval], replications=30, use_group_cv=True
) # To make sure we pass use GroupKFold
pdeval.get_results(mode="interval")
pdeval.get_results(mode="derivative")
pdeval.get_results(mode="multiple-pd-lines")
pieval.get_results(ntop=5)
plt.show()
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"simulations.datagen.DGPGraph",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"sklearn.model_selection.GroupKFold",
"cinspect.evaluators.PermutationImportanceEvaluator",
"pandas.DataFrame",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.utils.check_random_state",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"cinspect.evaluators.PartialDependanceEvaluator",
"cinspect.model_evaluation.bootcross_model",
"numpy.zeros",
"matplotlib.pyplot.figure"
] | [((640, 667), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (657, 667), False, 'import logging\n'), ((1136, 1168), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1154, 1168), False, 'from sklearn.utils import check_random_state\n'), ((1183, 1196), 'numpy.zeros', 'np.zeros', (['n_x'], {}), '(n_x)\n', (1191, 1196), True, 'import numpy as np\n'), ((1278, 1291), 'numpy.zeros', 'np.zeros', (['n_x'], {}), '(n_x)\n', (1286, 1291), True, 'import numpy as np\n'), ((1598, 1608), 'simulations.datagen.DGPGraph', 'DGPGraph', ([], {}), '()\n', (1606, 1608), False, 'from simulations.datagen import DGPGraph\n'), ((1872, 1884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1882, 1884), True, 'import matplotlib.pyplot as plt\n'), ((2140, 2169), 'numpy.linspace', 'np.linspace', (['T_min', 'T_max', '(20)'], {}), '(T_min, T_max, 20)\n', (2151, 2169), True, 'import numpy as np\n'), ((2254, 2273), 'numpy.mean', 'np.mean', (['te'], {'axis': '(1)'}), '(te, axis=1)\n', (2261, 2273), True, 'import numpy as np\n'), ((2381, 2448), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['T_levels', '(ate + ste_ate)', '(ate - ste_ate)'], {'alpha': '(0.5)'}), '(T_levels, ate + ste_ate, ate - ste_ate, alpha=0.5)\n', (2397, 2448), True, 'import matplotlib.pyplot as plt\n'), ((2453, 2481), 'matplotlib.pyplot.plot', 'plt.plot', (['T_levels', 'ate', '"""r"""'], {}), "(T_levels, ate, 'r')\n", (2461, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2486, 2544), 'matplotlib.pyplot.title', 'plt.title', (['"""Average treatment effect from the simulation."""'], {}), "('Average treatment effect from the simulation.')\n", (2495, 2544), True, 'import matplotlib.pyplot as plt\n'), ((2549, 2564), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (2559, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2569, 2584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (2579, 2584), True, 'import matplotlib.pyplot as plt\n'), ((2697, 2715), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2709, 2715), True, 'import pandas as pd\n'), ((3082, 3137), 'cinspect.evaluators.PartialDependanceEvaluator', 'PartialDependanceEvaluator', ([], {'feature_grids': "{'T': 'auto'}"}), "(feature_grids={'T': 'auto'})\n", (3108, 3137), False, 'from cinspect.evaluators import PartialDependanceEvaluator, PermutationImportanceEvaluator\n'), ((3151, 3194), 'cinspect.evaluators.PermutationImportanceEvaluator', 'PermutationImportanceEvaluator', ([], {'n_repeats': '(5)'}), '(n_repeats=5)\n', (3181, 3194), False, 'from cinspect.evaluators import PartialDependanceEvaluator, PermutationImportanceEvaluator\n'), ((3199, 3285), 'cinspect.model_evaluation.bootcross_model', 'bootcross_model', (['model', 'X', 'Y', '[pdeval, pieval]'], {'replications': '(30)', 'use_group_cv': '(True)'}), '(model, X, Y, [pdeval, pieval], replications=30,\n use_group_cv=True)\n', (3214, 3285), False, 'from cinspect.model_evaluation import bootcross_model\n'), ((3503, 3513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3511, 3513), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2306), 'numpy.std', 'np.std', (['te'], {'ddof': '(1)'}), '(te, ddof=1)\n', (2294, 2306), True, 'import numpy as np\n'), ((2309, 2319), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (2316, 2319), True, 'import numpy as np\n'), ((2932, 2959), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '()\n', (2957, 2959), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((740, 763), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (761, 763), False, 'import logging\n'), ((3014, 3036), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (3024, 3036), False, 'from sklearn.model_selection import GridSearchCV, GroupKFold\n')] |
import glfw
import numpy as np
def glm_normalize(v):
norm = np.linalg.norm(v)
if norm==0: return v
return v/norm
def glm_perspective(fovy,aspect,zNear,zFar):
assert(abs(aspect) > 0)
tanHalfFovy = np.tan(fovy / 2.0)
Result = np.zeros((4,4))
Result[0][0] = 1. / (aspect * tanHalfFovy)
Result[1][1] = 1. / (tanHalfFovy)
Result[2][3] = -1.
Result[2][2] = -float(zFar + zNear) / (zFar - zNear)
Result[3][2] = -(2. * zFar * zNear) / (zFar - zNear)
return Result
def glm_ortho(left, right, bottom, top, zNear, zFar):
Result = np.eye(4)
Result[0][0] = 2. / (right - left)
Result[1][1] = 2. / (top - bottom)
Result[3][0] = -float(right + left) / (right - left)
Result[3][1] = -float(top + bottom) / (top - bottom)
Result[2][2] = -2. / (zFar - zNear)
Result[3][2] = -float(zFar + zNear) / (zFar - zNear)
return Result
def glm_lookAt(eye, center, up):
eye = np.asarray(eye)
center = np.asarray(center)
up = np.asarray(up)
f = glm_normalize(center - eye)
s = glm_normalize(np.cross(f, up))
u = glm_normalize(np.cross(s, f))
Result = np.eye(4)
Result[:3,0] = s
Result[:3,1] = u
Result[:3,2] = -f
Result[3][0] =-np.dot(s, eye)
Result[3][1] =-np.dot(u, eye)
Result[3][2] = np.dot(f, eye)
return Result
class Controls:
def __init__(self, window):
self.window = window
self.ViewMatrix = np.eye(4)
self.ProjectionMatrix = np.eye(4)
# Initial position : on +Z
self.position = np.array([ 0., 0., 5. ])
# Initial horizontal angle : toward -Z
self.horizontalAngle = np.pi
# Initial vertical angle : none
self.verticalAngle = 0.0
# Initial Field of View
self.initialFoV = 45.0
self.speed = 3.0 # 3 units / second
self.mouseSpeed = 0.0005
self.lastTime = None
def update(self):
# glfwGetTime is called only once, the first time this function is called
if self.lastTime is None:
self.lastTime = glfw.get_time()
# Compute time difference between current and last frame
currentTime = glfw.get_time()
deltaTime = float(currentTime - self.lastTime)
# Get mouse position
xpos, ypos = glfw.get_cursor_pos(self.window)
# Reset mouse position for next frame
glfw.set_cursor_pos(self.window, 1024/2, 768/2);
# Compute new orientation
self.horizontalAngle += self.mouseSpeed * float(1024/2 - xpos )
self.verticalAngle += self.mouseSpeed * float( 768/2 - ypos )
# Direction : Spherical coordinates to Cartesian coordinates conversion
direction = np.array([
np.cos(self.verticalAngle) * np.sin(self.horizontalAngle),
np.sin(self.verticalAngle),
np.cos(self.verticalAngle) * np.cos(self.horizontalAngle)
])
# Right vector
right = np.array([
np.sin(self.horizontalAngle - np.pi/2.0),
0,
np.cos(self.horizontalAngle - np.pi/2.0)
])
# Up vector
up = np.cross( right, direction )
# Move forward
if glfw.get_key( self.window, glfw.KEY_UP ) == glfw.PRESS:
self.position += direction * deltaTime * self.speed
# Move backward
if glfw.get_key( self.window, glfw.KEY_DOWN ) == glfw.PRESS:
self.position -= direction * deltaTime * self.speed
# Strafe right
if glfw.get_key( self.window, glfw.KEY_RIGHT ) == glfw.PRESS:
self.position += right * deltaTime * self.speed
# Strafe left
if glfw.get_key( self.window, glfw.KEY_LEFT ) == glfw.PRESS:
self.position -= right * deltaTime * self.speed
FoV = self.initialFoV #- 5 * glfwGetMouseWheel();
# Now GLFW 3 requires setting up a callback for this.
# It's a bit too complicated for this beginner's tutorial, so it's disabled instead.
# Projection matrix : 45 deg Field of View, 4:3 ratio, display range : 0.1 unit <-> 100 units
self.ProjectionMatrix = glm_perspective(FoV*np.pi/180., 4.0 / 3.0, 0.1, 100.0)
# Camera matrix
self.ViewMatrix = glm_lookAt(
self.position, # Camera is here
self.position+direction, # and looks here : at the same position, plus "direction"
up # Head is up (set to 0,-1,0 to look upside-down)
)
# For the next frame, the "last time" will be "now"
self.lastTime = currentTime
self.ModelMatrix = np.eye(4)
self.MVP = np.array( np.dot(np.dot(self.ModelMatrix, self.ViewMatrix), self.ProjectionMatrix), dtype=np.float32 )
| [
"numpy.eye",
"numpy.tan",
"numpy.cross",
"glfw.set_cursor_pos",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"glfw.get_key",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"glfw.get_time",
"glfw.get_cursor_pos"
] | [((65, 82), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (79, 82), True, 'import numpy as np\n'), ((218, 236), 'numpy.tan', 'np.tan', (['(fovy / 2.0)'], {}), '(fovy / 2.0)\n', (224, 236), True, 'import numpy as np\n'), ((250, 266), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (258, 266), True, 'import numpy as np\n'), ((574, 583), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (580, 583), True, 'import numpy as np\n'), ((936, 951), 'numpy.asarray', 'np.asarray', (['eye'], {}), '(eye)\n', (946, 951), True, 'import numpy as np\n'), ((965, 983), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (975, 983), True, 'import numpy as np\n'), ((993, 1007), 'numpy.asarray', 'np.asarray', (['up'], {}), '(up)\n', (1003, 1007), True, 'import numpy as np\n'), ((1136, 1145), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1142, 1145), True, 'import numpy as np\n'), ((1297, 1311), 'numpy.dot', 'np.dot', (['f', 'eye'], {}), '(f, eye)\n', (1303, 1311), True, 'import numpy as np\n'), ((1067, 1082), 'numpy.cross', 'np.cross', (['f', 'up'], {}), '(f, up)\n', (1075, 1082), True, 'import numpy as np\n'), ((1106, 1120), 'numpy.cross', 'np.cross', (['s', 'f'], {}), '(s, f)\n', (1114, 1120), True, 'import numpy as np\n'), ((1229, 1243), 'numpy.dot', 'np.dot', (['s', 'eye'], {}), '(s, eye)\n', (1235, 1243), True, 'import numpy as np\n'), ((1263, 1277), 'numpy.dot', 'np.dot', (['u', 'eye'], {}), '(u, eye)\n', (1269, 1277), True, 'import numpy as np\n'), ((1441, 1450), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1447, 1450), True, 'import numpy as np\n'), ((1483, 1492), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1489, 1492), True, 'import numpy as np\n'), ((1553, 1578), 'numpy.array', 'np.array', (['[0.0, 0.0, 5.0]'], {}), '([0.0, 0.0, 5.0])\n', (1561, 1578), True, 'import numpy as np\n'), ((2178, 2193), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (2191, 2193), False, 'import glfw\n'), ((2300, 2332), 'glfw.get_cursor_pos', 'glfw.get_cursor_pos', (['self.window'], {}), '(self.window)\n', (2319, 2332), False, 'import glfw\n'), ((2388, 2439), 'glfw.set_cursor_pos', 'glfw.set_cursor_pos', (['self.window', '(1024 / 2)', '(768 / 2)'], {}), '(self.window, 1024 / 2, 768 / 2)\n', (2407, 2439), False, 'import glfw\n'), ((3140, 3166), 'numpy.cross', 'np.cross', (['right', 'direction'], {}), '(right, direction)\n', (3148, 3166), True, 'import numpy as np\n'), ((4635, 4644), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4641, 4644), True, 'import numpy as np\n'), ((2074, 2089), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (2087, 2089), False, 'import glfw\n'), ((3204, 3242), 'glfw.get_key', 'glfw.get_key', (['self.window', 'glfw.KEY_UP'], {}), '(self.window, glfw.KEY_UP)\n', (3216, 3242), False, 'import glfw\n'), ((3359, 3399), 'glfw.get_key', 'glfw.get_key', (['self.window', 'glfw.KEY_DOWN'], {}), '(self.window, glfw.KEY_DOWN)\n', (3371, 3399), False, 'import glfw\n'), ((3515, 3556), 'glfw.get_key', 'glfw.get_key', (['self.window', 'glfw.KEY_RIGHT'], {}), '(self.window, glfw.KEY_RIGHT)\n', (3527, 3556), False, 'import glfw\n'), ((3667, 3707), 'glfw.get_key', 'glfw.get_key', (['self.window', 'glfw.KEY_LEFT'], {}), '(self.window, glfw.KEY_LEFT)\n', (3679, 3707), False, 'import glfw\n'), ((2812, 2838), 'numpy.sin', 'np.sin', (['self.verticalAngle'], {}), '(self.verticalAngle)\n', (2818, 2838), True, 'import numpy as np\n'), ((2984, 3026), 'numpy.sin', 'np.sin', (['(self.horizontalAngle - np.pi / 2.0)'], {}), '(self.horizontalAngle - np.pi / 2.0)\n', (2990, 3026), True, 'import numpy as np\n'), ((3054, 3096), 'numpy.cos', 'np.cos', (['(self.horizontalAngle - np.pi / 2.0)'], {}), '(self.horizontalAngle - np.pi / 2.0)\n', (3060, 3096), True, 'import numpy as np\n'), ((4682, 4723), 'numpy.dot', 'np.dot', (['self.ModelMatrix', 'self.ViewMatrix'], {}), '(self.ModelMatrix, self.ViewMatrix)\n', (4688, 4723), True, 'import numpy as np\n'), ((2740, 2766), 'numpy.cos', 'np.cos', (['self.verticalAngle'], {}), '(self.verticalAngle)\n', (2746, 2766), True, 'import numpy as np\n'), ((2769, 2797), 'numpy.sin', 'np.sin', (['self.horizontalAngle'], {}), '(self.horizontalAngle)\n', (2775, 2797), True, 'import numpy as np\n'), ((2852, 2878), 'numpy.cos', 'np.cos', (['self.verticalAngle'], {}), '(self.verticalAngle)\n', (2858, 2878), True, 'import numpy as np\n'), ((2881, 2909), 'numpy.cos', 'np.cos', (['self.horizontalAngle'], {}), '(self.horizontalAngle)\n', (2887, 2909), True, 'import numpy as np\n')] |
# Author: <NAME>
# Licensed under BSD 3-Clause License, see included LICENSE file
"""Parse the cookiebot cc.js file.
Usage: parse_cb_js.py <file>"""
import re
import sys
from ast import literal_eval
from enum import Enum
from docopt import docopt
## Some observations:
# The Javascript file contains some data that isn't shown on the actual webpage, namely in the last 3 entries of each array.
# Furthermore, it appears as if not all entries in the Cookiebot table are actually true Cookies.
# The following types are listed:
# > HTTP: ID 1, most likely actual Cookies.
# > HTML: ID 2, no idea what this type is supposed to be.
# > Pixel: ID 5, Tracking pixels embedded on the website
# Additional information includes a regular expression (possibly to identify variations of the same cookie)
# and a final URL as the last entry, possibly the true destination where the data is sent to?
class CookieCategories(Enum):
NECESSARY = 0
PREFERENCE = 1
STATISTICS = 2
ADVERTISING = 3
UNCLASSIFIED = 4
category_patterns = {CookieCategories.NECESSARY : "CookieConsentDialog\.cookieTableNecessary = (.*);",
CookieCategories.PREFERENCE : "CookieConsentDialog\.cookieTablePreference = (.*);",
CookieCategories.STATISTICS : "CookieConsentDialog\.cookieTableStatistics = (.*);",
CookieCategories.ADVERTISING : "CookieConsentDialog\.cookieTableAdvertising = (.*);",
CookieCategories.UNCLASSIFIED: "CookieConsentDialog\.cookieTableUnclassified = (.*);"}
if __name__ == "__main__":
args = docopt(__doc__)
with open(args["<file>"], 'r') as fd:
cb_js = fd.read()
for cat in CookieCategories:
matchobj = re.search(category_patterns[cat], cb_js)
print("==============================\n"
f"Category: {cat}\n"
f"==============================")
if not matchobj:
print("Did not find match", file=sys.stderr)
exit(1)
cookies = literal_eval(matchobj.group(1))
for c in cookies:
print(f"cookie name: {c[0]}")
print(f"source: {c[1]}")
print(f"purpose: {c[2]}")
print(f"expires after: {c[3]}")
print(f"type name: {c[4]}")
print(f"type id: {c[5]}")
print(f"regex match: {c[6]}")
print(f"Hyperlink URL: {c[7]}")
print(f"--------------")
| [
"docopt.docopt",
"re.search"
] | [((1590, 1605), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1596, 1605), False, 'from docopt import docopt\n'), ((1730, 1770), 're.search', 're.search', (['category_patterns[cat]', 'cb_js'], {}), '(category_patterns[cat], cb_js)\n', (1739, 1770), False, 'import re\n')] |
# %%
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential, losses, optimizers, datasets
# %%
x = tf.constant([2., 1., 0.1])
layer = layers.Softmax(axis=-1)
layer(x)
# %%
def proprocess(x, y):
x = tf.reshape(x, [-1])
return x, y
# x: [60k, 28, 28],
# y: [60k]
(x, y), (x_test, y_test) = datasets.mnist.load_data()
# x: [0~255] => [0~1.]
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
# x: [0~255] => [0~1.]
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255.
y_test = tf.convert_to_tensor(y_test, dtype=tf.int32)
train_db = tf.data.Dataset.from_tensor_slices((x, y))
train_db = train_db.shuffle(1000).map(proprocess).batch(128)
val_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
val_db = val_db.shuffle(1000).map(proprocess).batch(128)
x, y = next(iter(train_db))
print(x.shape, y.shape)
# %%
from tensorflow.keras import layers, Sequential
network = Sequential([
layers.Dense(3, activation=None),
layers.ReLU(),
layers.Dense(2, activation=None),
layers.ReLU()
])
x = tf.random.normal([4, 3])
network(x)
# %%
layers_num = 2
network = Sequential([])
for _ in range(layers_num):
network.add(layers.Dense(3))
network.add(layers.ReLU())
network.build(input_shape=(None, 4))
network.summary()
# %%
for p in network.trainable_variables:
print(p.name, p.shape)
# %%
# 创建5层的全连接层网络
network = Sequential([layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10)])
network.build(input_shape=(4, 28 * 28))
network.summary()
# %%
# 导入优化器,损失函数模块
from tensorflow.keras import optimizers, losses
# 采用Adam优化器,学习率为0.01;采用交叉熵损失函数,包含Softmax
network.compile(optimizer=optimizers.Adam(lr=0.01),
loss=losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'] # 设置测量指标为准确率
)
# %%
# 指定训练集为db,验证集为val_db,训练5个epochs,每2个epoch验证一次
history = network.fit(train_db, epochs=5, validation_data=val_db, validation_freq=2)
# %%
history.history # 打印训练记录
# %%
# 保存模型参数到文件上
network.save_weights('weights.ckpt')
print('saved weights.')
del network # 删除网络对象
# 重新创建相同的网络结构
network = Sequential([layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10)])
network.compile(optimizer=optimizers.Adam(lr=0.01),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
# 从参数文件中读取数据并写入当前网络
network.load_weights('weights.ckpt')
print('loaded weights!')
# %%
# 新建池化层
global_average_layer = layers.GlobalAveragePooling2D()
# 利用上一层的输出作为本层的输入,测试其输出
x = tf.random.normal([4, 7, 7, 2048])
out = global_average_layer(x) # 池化层降维
print(out.shape)
# %%
# 新建全连接层
fc = layers.Dense(100)
# 利用上一层的输出作为本层的输入,测试其输出
x = tf.random.normal([4, 2048])
out = fc(x)
print(out.shape)
# %%
| [
"tensorflow.random.normal",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.optimizers.Adam",
"tensorflow.losses.CategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.GlobalAveragePooling2D"
] | [((146, 174), 'tensorflow.constant', 'tf.constant', (['[2.0, 1.0, 0.1]'], {}), '([2.0, 1.0, 0.1])\n', (157, 174), True, 'import tensorflow as tf\n'), ((181, 204), 'tensorflow.keras.layers.Softmax', 'layers.Softmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (195, 204), False, 'from tensorflow.keras import layers, Sequential\n'), ((347, 373), 'tensorflow.keras.datasets.mnist.load_data', 'datasets.mnist.load_data', ([], {}), '()\n', (371, 373), False, 'from tensorflow.keras import layers, Sequential, losses, optimizers, datasets\n'), ((454, 493), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y'], {'dtype': 'tf.int32'}), '(y, dtype=tf.int32)\n', (474, 493), True, 'import tensorflow as tf\n'), ((590, 634), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y_test'], {'dtype': 'tf.int32'}), '(y_test, dtype=tf.int32)\n', (610, 634), True, 'import tensorflow as tf\n'), ((647, 689), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x, y)'], {}), '((x, y))\n', (681, 689), True, 'import tensorflow as tf\n'), ((761, 813), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), '((x_test, y_test))\n', (795, 813), True, 'import tensorflow as tf\n'), ((1122, 1146), 'tensorflow.random.normal', 'tf.random.normal', (['[4, 3]'], {}), '([4, 3])\n', (1138, 1146), True, 'import tensorflow as tf\n'), ((1189, 1203), 'tensorflow.keras.Sequential', 'Sequential', (['[]'], {}), '([])\n', (1199, 1203), False, 'from tensorflow.keras import layers, Sequential\n'), ((2946, 2977), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (2975, 2977), False, 'from tensorflow.keras import layers, Sequential\n'), ((3006, 3039), 'tensorflow.random.normal', 'tf.random.normal', (['[4, 7, 7, 2048]'], {}), '([4, 7, 7, 2048])\n', (3022, 3039), True, 'import tensorflow as tf\n'), ((3116, 3133), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(100)'], {}), '(100)\n', (3128, 3133), False, 'from tensorflow.keras import layers, Sequential\n'), ((3162, 3189), 'tensorflow.random.normal', 'tf.random.normal', (['[4, 2048]'], {}), '([4, 2048])\n', (3178, 3189), True, 'import tensorflow as tf\n'), ((251, 270), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1]'], {}), '(x, [-1])\n', (261, 270), True, 'import tensorflow as tf\n'), ((401, 442), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (421, 442), True, 'import tensorflow as tf\n'), ((527, 573), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_test'], {'dtype': 'tf.float32'}), '(x_test, dtype=tf.float32)\n', (547, 573), True, 'import tensorflow as tf\n'), ((1006, 1038), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(3)'], {'activation': 'None'}), '(3, activation=None)\n', (1018, 1038), False, 'from tensorflow.keras import layers, Sequential\n'), ((1044, 1057), 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (1055, 1057), False, 'from tensorflow.keras import layers, Sequential\n'), ((1063, 1095), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2)'], {'activation': 'None'}), '(2, activation=None)\n', (1075, 1095), False, 'from tensorflow.keras import layers, Sequential\n'), ((1101, 1114), 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (1112, 1114), False, 'from tensorflow.keras import layers, Sequential\n'), ((1248, 1263), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(3)'], {}), '(3)\n', (1260, 1263), False, 'from tensorflow.keras import layers, Sequential\n'), ((1281, 1294), 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (1292, 1294), False, 'from tensorflow.keras import layers, Sequential\n'), ((1464, 1500), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1476, 1500), False, 'from tensorflow.keras import layers, Sequential\n'), ((1524, 1560), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1536, 1560), False, 'from tensorflow.keras import layers, Sequential\n'), ((1584, 1619), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1596, 1619), False, 'from tensorflow.keras import layers, Sequential\n'), ((1643, 1678), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1655, 1678), False, 'from tensorflow.keras import layers, Sequential\n'), ((1702, 1718), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (1714, 1718), False, 'from tensorflow.keras import layers, Sequential\n'), ((1916, 1940), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (1931, 1940), False, 'from tensorflow.keras import optimizers, losses\n'), ((1963, 2011), 'tensorflow.keras.losses.CategoricalCrossentropy', 'losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1993, 2011), False, 'from tensorflow.keras import optimizers, losses\n'), ((2389, 2425), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (2401, 2425), False, 'from tensorflow.keras import layers, Sequential\n'), ((2449, 2485), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2461, 2485), False, 'from tensorflow.keras import layers, Sequential\n'), ((2509, 2544), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2521, 2544), False, 'from tensorflow.keras import layers, Sequential\n'), ((2568, 2603), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (2580, 2603), False, 'from tensorflow.keras import layers, Sequential\n'), ((2627, 2643), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (2639, 2643), False, 'from tensorflow.keras import layers, Sequential\n'), ((2672, 2696), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (2687, 2696), False, 'from tensorflow.keras import optimizers, losses\n'), ((2719, 2770), 'tensorflow.losses.CategoricalCrossentropy', 'tf.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2752, 2770), True, 'import tensorflow as tf\n')] |
# Generated by Django 3.0.7 on 2020-08-12 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('segint_api', '0034_auto_20200807_1421'),
]
operations = [
migrations.AlterField(
model_name='segmentationtelemetry',
name='client_software_version',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='segmentationtelemetry',
name='segmentation_id',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='segmentationtelemetry',
name='segmentation_model_id',
field=models.CharField(max_length=200),
),
]
| [
"django.db.models.CharField"
] | [((371, 403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (387, 403), False, 'from django.db import migrations, models\n'), ((549, 581), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (565, 581), False, 'from django.db import migrations, models\n'), ((733, 765), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (749, 765), False, 'from django.db import migrations, models\n')] |
import numpy as np
import os
from pyarrow.parquet import ParquetFile
import torch
from PIL import Image, ImageOps
import cv2
from typing import Union, Tuple
from scipy.spatial.transform import Rotation
# pytorch dataloader hangs on cv2.undistort() without the line below
# https://github.com/pytorch/pytorch/issues/1838
cv2.setNumThreads(0)
# from dataset.azure_loader import AzureImageLoader
# from utils.speed_curvature import speed_and_curvature_to_pose, speed_and_curvature_to_position_and_orientation
M_PI = 3.14159265359
INTRINSICS = 1033.0 / 2, 0.0, 960.0 / 2, 0.0, 1033.0 / 2, 600.0 / 2, 0.0, 0.0, 1.0
CAMERA_POSE = {
'front-forward': (0.0, 0.0, 1.544254, 0.0, 2.0 * M_PI / 180.0, 0.0),
'front-left-leftward': (-0.050984, 0.53502, 1.53035, 0.0, 0.0, 60.0 * M_PI / 180.0),
'front-right-rightward': (-0.050984, -0.53502, 1.53035, 0.0, 0.0, -60.0 * M_PI / 180.0),
}
DISTORTION_COEFS = -0.3286423683166504, 0.08561516553163528, -4.951587106916122e-05, -0.00080674490891397, 0.0
FRAME_RATE = 25
def pose_vec_to_pose(vec):
""" converts camera pose vector to a pose matrix
Camera Extrinsic pose vectors with the world origin at the horizontal floor
directly below the front-forward-Mono camera
6D vector representing [translation_x_m, translation_y_m, translation_z_m,
rotation_x_rad, rotation_y_rad, rotation_z_rad ]
X axis is positive going forward.
Y axis is positive going left
Z axis is positive going up
Converts to pose where camera is in opencv coordinates (right-down-front)
Args:
vec: (x, y, z, roll, pitch, yaw)
Returns:
4 x 4 pose matrix
"""
pose = np.eye(4)
R = Rotation.from_euler('xz', (-90,-90), True).as_matrix() # change camera view direction to opencv
pose[:3, :3] = Rotation.from_euler('xyz', vec[3:]).as_matrix() @ R
pose[:3, 3] = vec[:3]
return pose.astype(np.float32)
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
class UndistortCrop():
def __init__(self, crop=[32,70,928,518]):
self.crop = crop
self.bounds_mat = np.array([
[1, 0, -crop[0]],
[0, 1, -crop[1]],
[0, 0, 1]
], dtype=np.float32)
def __call__(self, data):
# undistrot
image = np.asarray(data['rgb'])
intrinsics = data['intrinsics']
distortion_coefs = data['distortion_coefs']
alpha = 1
new_intrinsics, _ = cv2.getOptimalNewCameraMatrix(
intrinsics, distortion_coefs, (image.shape[1], image.shape[0]), alpha
)
mapx, mapy = cv2.initUndistortRectifyMap(intrinsics, distortion_coefs, None, new_intrinsics, (image.shape[1], image.shape[0]), 5)
data['rgb'] = Image.fromarray(cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR))
data['intrinsics'] = new_intrinsics
del data['distortion_coefs']
if 'rgb_context' in data:
data['rgb_context'] = [Image.fromarray(cv2.remap(np.asarray(img), mapx, mapy, cv2.INTER_LINEAR)) for img in data['rgb_context']]
# crop
data['rgb'] = data['rgb'].crop(self.crop)
if 'rgb_context' in data:
data['rgb_context'] = [img.crop(self.crop) for img in data['rgb_context']]
data['intrinsics'] = self.bounds_mat @ data['intrinsics']
return data
class SingleWayveDataset(torch.utils.data.Dataset):
def __init__(self, dataframe, img_root='', past=0, future=0, stride=1, cameras=['front-forward'], transform=None, azure_loader=None):
assert cameras == ['front-forward'] # TODO support surround cameras
self.img_root = img_root
self.past = past
self.future = future
self.stride =stride
self.length = len(dataframe) - (past + future) * stride
self.indices = np.array(list(range(-past, 0)) + list(range(1,future+1))) * stride
self.cameras = cameras
self.pre_transform = UndistortCrop()
self.transform = transform
self.azure_loader = azure_loader
# camera data
self.intrinsics = {camera: np.array(INTRINSICS, dtype=np.float32).reshape(3, 3) for camera in self.cameras}
self.pose = {camera: pose_vec_to_pose(CAMERA_POSE[camera]) for camera in self.cameras}
self.distortion_coefs = {camera: np.array(DISTORTION_COEFS, dtype=np.float32) for camera in self.cameras}
# dataframe data
dataframe = dataframe.to_pandas()
self.runid = dataframe.iloc[0].run_id_noseginfix
self.speed = dataframe.speed_state.to_numpy(dtype=np.float32) / 3.6
self.curvature = dataframe.curvature_invm_state.to_numpy(dtype=np.float32)
self.timestamps = {key: dataframe[key+'_image_timestamp_rgb'].to_numpy(dtype=np.int64) for key in self.cameras}
# integrate speed/curvature to get pose
timestamps = self.timestamps['front-forward']
timestamps_float = ((timestamps - timestamps[0]) / 1e6).astype(np.float32)
pose = speed_and_curvature_to_pose(
torch.tensor(self.speed), torch.tensor(self.curvature), torch.tensor(timestamps_float)
)
self.pose = pose.numpy()
def __getitem__(self, index):
data = {}
camera = self.cameras[0]
data['filename'] = os.path.join(self.runid, camera, str(self.timestamps[camera][index])).replace('/', '_')
data['intrinsics'] = self.intrinsics[camera]
data['distortion_coefs'] = self.distortion_coefs[camera]
data['rgb'] = self._load_img(self.runid, camera, self.timestamps[camera][index])
if data['rgb'] is None:
return self[index-1]
data['pose'] = self.pose[index]
if self.past>0 or self.future>0:
data['rgb_context'] = [self._load_img(self.runid, camera, self.timestamps[camera][index+j]) for j in self.indices]
if any([img is None for img in data['rgb_context']]):
return self[index-1]
data['pose_context'] = [np.linalg.inverse(self.pose[index+j]) @ self.pose[index] for j in self.indices]
data = self.pre_transform(data)
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self):
return self.length
def _load_img(self, run_id, camera, timestamp):
if self.azure_loader is None:
# load from local storage
img_name = os.path.join(self.img_root, run_id, 'cameras', camera, str(timestamp) + 'unixus.jpeg')
if os.path.exists(img_name):
image = pil_loader(img_name)
else:
print('skipping missing image: ', img_name)
image = None
else:
image = self.azure_loader(run_id, camera, timestamp)
# hack for images resized/cropped on disk
if image is None:
pass
elif image.size == (1920, 1200):
image = image.resize((960, 600))
elif image.size == (1920, 896):
image = image.resize((960, 448))
image = ImageOps.pad(image, (960, 600), centering=(0,0))
elif image.size == (960, 448):
image = ImageOps.pad(image, (960, 600), centering=(0,0))
elif image.size == (960, 600):
pass
elif image.size == (480, 300):
image = image.resize((960, 600))
else:
print('unknown image size', image.size, img_name)
image = None
return image
class WayveDataset(torch.utils.data.ConcatDataset):
def __init__(self, parquets, img_root='', past=0, future=0, stride=1, cameras=['front-forward'], transform=None, load_from_azure=False):
columns = [
'speed_state',
'curvature_invm_state',
'run_id_noseginfix',
] + [cam + '_image_timestamp_rgb' for cam in cameras]
# for loading images from azure blob storage
azure_loader = AzureImageLoader() if load_from_azure else None
# open a dataframe for each run_id and construct datasets
datasets = []
count = 0
for i, parquet in enumerate(parquets):
pqfile = ParquetFile(parquet, memory_map=False)
num_row_groups = pqfile.metadata.num_row_groups
for j in range(num_row_groups):
if count%100 == 0:
print('initializing parquet %d/%d run %d/%d'%(i+1, len(parquets), j+1, num_row_groups))
dataframe = pqfile.read_row_group(j, columns=columns)
if len(dataframe) > (past + 1 + future) * stride:
datasets.append(
SingleWayveDataset(dataframe, img_root, past, future, stride, cameras, transform, azure_loader)
)
count += 1
super().__init__(datasets)
def __getitem__(self, idx):
data = super().__getitem__(idx)
data['idx'] = idx
return data
###############################################################################################
# from wayve/core/ai/prepare_data/py_prepare_data.py
def prepend_value_to_tensor(tensor: torch.Tensor, value: float) -> torch.Tensor:
return torch.cat([torch.tensor([value], device=tensor.device).expand(tensor.shape[:-1]+(1,)), tensor], dim=-1)
def _safe_compute_position_from_angles(angles_rad: torch.Tensor,) -> Tuple[torch.Tensor, torch.Tensor]: # 1 / meters
safe_threshold = 1e-3
mask = torch.abs(angles_rad) > safe_threshold
relative_x_position_m = torch.zeros(angles_rad.shape, device=angles_rad.device)
relative_y_position_m = torch.zeros(angles_rad.shape, device=angles_rad.device)
# Piecewise function to avoid instabilities caused by division.
relative_x_position_m[mask] = torch.sin(angles_rad[mask]) / angles_rad[mask]
relative_x_position_m[~mask] = 1 - (torch.pow(angles_rad[~mask], 2) / 6)
relative_y_position_m[mask] = (1 - torch.cos(angles_rad[mask])) / angles_rad[mask]
relative_y_position_m[~mask] = angles_rad[~mask] / 2
return relative_x_position_m, relative_y_position_m
def _rotate_vectors_given_angles(
relative_x_position_m: torch.Tensor, relative_y_position_m: torch.Tensor, angles_rad: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
rotated_relative_x_position_m = relative_x_position_m * torch.cos(angles_rad) - relative_y_position_m * torch.sin(
angles_rad
)
rotated_relative_y_position_m = relative_y_position_m * torch.cos(angles_rad) + relative_x_position_m * torch.sin(
angles_rad
)
return rotated_relative_x_position_m, rotated_relative_y_position_m
def speed_and_curvature_to_position_and_orientation(
speed_ms: torch.Tensor, curvature_inv_m: torch.Tensor, timestamps_sec: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# Assumes that scalar values are in the last dimension of the given tensors.
# x-axis points forward, y-axis points left
# angle is calculated from x-axis where positive direction is counter clockwise.
# TODO: how do we interpolate speed and curvature here?
speed_values_m = (speed_ms[..., 1:] + speed_ms[..., :-1]) / 2
curvature_values_invm = (curvature_inv_m[..., 1:] + curvature_inv_m[..., :-1]) / 2
delta_time_sec = timestamps_sec[..., 1:] - timestamps_sec[..., :-1]
distances_travelled_m = speed_values_m * delta_time_sec
angles_rad = distances_travelled_m * curvature_values_invm
cumulative_angles_rad = torch.cumsum(angles_rad, dim=-1)
cumulative_angles_rad_with_zero = prepend_value_to_tensor(tensor=cumulative_angles_rad, value=0.0)
relative_x_position_m, relative_y_positon_m = _safe_compute_position_from_angles(angles_rad)
rotated_relative_x_position_m, rotated_relative_y_position_m = _rotate_vectors_given_angles(
relative_x_position_m, relative_y_positon_m, cumulative_angles_rad_with_zero[..., :-1]
)
rotated_scaled_relative_x_position_m = rotated_relative_x_position_m * distances_travelled_m
rotated_scaled_relative_y_position_m = rotated_relative_y_position_m * distances_travelled_m
absolute_x_position_m = torch.cumsum(rotated_scaled_relative_x_position_m, dim=-1)
absolute_y_position_m = torch.cumsum(rotated_scaled_relative_y_position_m, dim=-1)
absolute_x_position_m_with_zero = prepend_value_to_tensor(tensor=absolute_x_position_m, value=0.0)
absolute_y_position_m_with_zero = prepend_value_to_tensor(tensor=absolute_y_position_m, value=0.0)
return absolute_x_position_m_with_zero, absolute_y_position_m_with_zero, cumulative_angles_rad_with_zero
def position_and_orientation_to_pose(x: torch.Tensor, y: torch.Tensor, r: torch.Tensor) -> torch.Tensor:
zero = torch.zeros_like(x)
one = torch.ones_like(x)
pose = torch.stack(
[
torch.stack([r.cos(), -r.sin(), zero, x], 1),
torch.stack([r.sin(), r.cos(), zero, y], 1),
torch.stack([zero, zero, one, zero], 1),
torch.stack([zero, zero, zero, one], 1),
],
1,
)
return pose
def speed_and_curvature_to_pose(speed_ms: torch.Tensor, curvature_inv_m: torch.Tensor, timestamps_sec: torch.Tensor) -> torch.Tensor:
x, y, r = speed_and_curvature_to_position_and_orientation(
speed_ms, curvature_inv_m, timestamps_sec
)
pose = position_and_orientation_to_pose(x, y, r)
return pose
def main():
mount = '/home/zak/WayveData/'
img_root = 'remote/image_storage/images-960x600/'
parquets = ['remote/pure_dataset/dev_datasets/ipace/debug/dataset=train/data-00000-of-00001']
past = 1
future = 1
stride = 1
img_root = os.path.join(mount, img_root)
parquets = [os.path.join(mount, parquet) for parquet in parquets]
dataset = WayveDataset(parquets, img_root, past, future, stride)
print(dataset[0])
if __name__ == '__main__':
main() | [
"cv2.initUndistortRectifyMap",
"torch.sin",
"cv2.remap",
"torch.pow",
"numpy.array",
"torch.cos",
"os.path.exists",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.linalg.inverse",
"numpy.asarray",
"torch.zeros_like",
"numpy.eye",
"torch.ones_like",
"torch.abs",
"cv2.getOptimalNewCameraMatrix",
"torch.cumsum",
"PIL.ImageOps.pad",
"cv2.setNumThreads",
"PIL.Image.open",
"torch.stack",
"os.path.join",
"torch.tensor",
"pyarrow.parquet.ParquetFile",
"torch.zeros"
] | [((321, 341), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (338, 341), False, 'import cv2\n'), ((1654, 1663), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1660, 1663), True, 'import numpy as np\n'), ((9730, 9785), 'torch.zeros', 'torch.zeros', (['angles_rad.shape'], {'device': 'angles_rad.device'}), '(angles_rad.shape, device=angles_rad.device)\n', (9741, 9785), False, 'import torch\n'), ((9814, 9869), 'torch.zeros', 'torch.zeros', (['angles_rad.shape'], {'device': 'angles_rad.device'}), '(angles_rad.shape, device=angles_rad.device)\n', (9825, 9869), False, 'import torch\n'), ((11690, 11722), 'torch.cumsum', 'torch.cumsum', (['angles_rad'], {'dim': '(-1)'}), '(angles_rad, dim=-1)\n', (11702, 11722), False, 'import torch\n'), ((12346, 12404), 'torch.cumsum', 'torch.cumsum', (['rotated_scaled_relative_x_position_m'], {'dim': '(-1)'}), '(rotated_scaled_relative_x_position_m, dim=-1)\n', (12358, 12404), False, 'import torch\n'), ((12433, 12491), 'torch.cumsum', 'torch.cumsum', (['rotated_scaled_relative_y_position_m'], {'dim': '(-1)'}), '(rotated_scaled_relative_y_position_m, dim=-1)\n', (12445, 12491), False, 'import torch\n'), ((12928, 12947), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (12944, 12947), False, 'import torch\n'), ((12958, 12976), 'torch.ones_like', 'torch.ones_like', (['x'], {}), '(x)\n', (12973, 12976), False, 'import torch\n'), ((13863, 13892), 'os.path.join', 'os.path.join', (['mount', 'img_root'], {}), '(mount, img_root)\n', (13875, 13892), False, 'import os\n'), ((2095, 2108), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (2105, 2108), False, 'from PIL import Image, ImageOps\n'), ((2265, 2340), 'numpy.array', 'np.array', (['[[1, 0, -crop[0]], [0, 1, -crop[1]], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, -crop[0]], [0, 1, -crop[1]], [0, 0, 1]], dtype=np.float32)\n', (2273, 2340), True, 'import numpy as np\n'), ((2455, 2478), 'numpy.asarray', 'np.asarray', (["data['rgb']"], {}), "(data['rgb'])\n", (2465, 2478), True, 'import numpy as np\n'), ((2617, 2721), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['intrinsics', 'distortion_coefs', '(image.shape[1], image.shape[0])', 'alpha'], {}), '(intrinsics, distortion_coefs, (image.shape[1],\n image.shape[0]), alpha)\n', (2646, 2721), False, 'import cv2\n'), ((2761, 2881), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['intrinsics', 'distortion_coefs', 'None', 'new_intrinsics', '(image.shape[1], image.shape[0])', '(5)'], {}), '(intrinsics, distortion_coefs, None,\n new_intrinsics, (image.shape[1], image.shape[0]), 5)\n', (2788, 2881), False, 'import cv2\n'), ((9662, 9683), 'torch.abs', 'torch.abs', (['angles_rad'], {}), '(angles_rad)\n', (9671, 9683), False, 'import torch\n'), ((9973, 10000), 'torch.sin', 'torch.sin', (['angles_rad[mask]'], {}), '(angles_rad[mask])\n', (9982, 10000), False, 'import torch\n'), ((13909, 13937), 'os.path.join', 'os.path.join', (['mount', 'parquet'], {}), '(mount, parquet)\n', (13921, 13937), False, 'import os\n'), ((1672, 1715), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xz"""', '(-90, -90)', '(True)'], {}), "('xz', (-90, -90), True)\n", (1691, 1715), False, 'from scipy.spatial.transform import Rotation\n'), ((2917, 2963), 'cv2.remap', 'cv2.remap', (['image', 'mapx', 'mapy', 'cv2.INTER_LINEAR'], {}), '(image, mapx, mapy, cv2.INTER_LINEAR)\n', (2926, 2963), False, 'import cv2\n'), ((4484, 4528), 'numpy.array', 'np.array', (['DISTORTION_COEFS'], {'dtype': 'np.float32'}), '(DISTORTION_COEFS, dtype=np.float32)\n', (4492, 4528), True, 'import numpy as np\n'), ((5203, 5227), 'torch.tensor', 'torch.tensor', (['self.speed'], {}), '(self.speed)\n', (5215, 5227), False, 'import torch\n'), ((5229, 5257), 'torch.tensor', 'torch.tensor', (['self.curvature'], {}), '(self.curvature)\n', (5241, 5257), False, 'import torch\n'), ((5259, 5289), 'torch.tensor', 'torch.tensor', (['timestamps_float'], {}), '(timestamps_float)\n', (5271, 5289), False, 'import torch\n'), ((6718, 6742), 'os.path.exists', 'os.path.exists', (['img_name'], {}), '(img_name)\n', (6732, 6742), False, 'import os\n'), ((8357, 8395), 'pyarrow.parquet.ParquetFile', 'ParquetFile', (['parquet'], {'memory_map': '(False)'}), '(parquet, memory_map=False)\n', (8368, 8395), False, 'from pyarrow.parquet import ParquetFile\n'), ((10060, 10091), 'torch.pow', 'torch.pow', (['angles_rad[~mask]', '(2)'], {}), '(angles_rad[~mask], 2)\n', (10069, 10091), False, 'import torch\n'), ((10137, 10164), 'torch.cos', 'torch.cos', (['angles_rad[mask]'], {}), '(angles_rad[mask])\n', (10146, 10164), False, 'import torch\n'), ((10538, 10559), 'torch.cos', 'torch.cos', (['angles_rad'], {}), '(angles_rad)\n', (10547, 10559), False, 'import torch\n'), ((10586, 10607), 'torch.sin', 'torch.sin', (['angles_rad'], {}), '(angles_rad)\n', (10595, 10607), False, 'import torch\n'), ((10683, 10704), 'torch.cos', 'torch.cos', (['angles_rad'], {}), '(angles_rad)\n', (10692, 10704), False, 'import torch\n'), ((10731, 10752), 'torch.sin', 'torch.sin', (['angles_rad'], {}), '(angles_rad)\n', (10740, 10752), False, 'import torch\n'), ((13138, 13177), 'torch.stack', 'torch.stack', (['[zero, zero, one, zero]', '(1)'], {}), '([zero, zero, one, zero], 1)\n', (13149, 13177), False, 'import torch\n'), ((13191, 13230), 'torch.stack', 'torch.stack', (['[zero, zero, zero, one]', '(1)'], {}), '([zero, zero, zero, one], 1)\n', (13202, 13230), False, 'import torch\n'), ((1788, 1823), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', 'vec[3:]'], {}), "('xyz', vec[3:])\n", (1807, 1823), False, 'from scipy.spatial.transform import Rotation\n'), ((4267, 4305), 'numpy.array', 'np.array', (['INTRINSICS'], {'dtype': 'np.float32'}), '(INTRINSICS, dtype=np.float32)\n', (4275, 4305), True, 'import numpy as np\n'), ((6160, 6199), 'numpy.linalg.inverse', 'np.linalg.inverse', (['self.pose[index + j]'], {}), '(self.pose[index + j])\n', (6177, 6199), True, 'import numpy as np\n'), ((7260, 7309), 'PIL.ImageOps.pad', 'ImageOps.pad', (['image', '(960, 600)'], {'centering': '(0, 0)'}), '(image, (960, 600), centering=(0, 0))\n', (7272, 7309), False, 'from PIL import Image, ImageOps\n'), ((9412, 9455), 'torch.tensor', 'torch.tensor', (['[value]'], {'device': 'tensor.device'}), '([value], device=tensor.device)\n', (9424, 9455), False, 'import torch\n'), ((3142, 3157), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (3152, 3157), True, 'import numpy as np\n'), ((7368, 7417), 'PIL.ImageOps.pad', 'ImageOps.pad', (['image', '(960, 600)'], {'centering': '(0, 0)'}), '(image, (960, 600), centering=(0, 0))\n', (7380, 7417), False, 'from PIL import Image, ImageOps\n')] |
# -*- coding: utf-8 -*-
"""Transaction class."""
import enum
import hashlib
import json
from typing import (
Union,
Optional,
Dict
)
from . import tx_hash as txh
from ..obj import aer
from .address import Address
from ..grpc import blockchain_pb2
from ..utils.encoding import encode_signature, decode_signature, encode_payload
@enum.unique
class TxType(enum.Enum):
NORMAL = blockchain_pb2.NORMAL
GOVERNANCE = blockchain_pb2.GOVERNANCE
SC_REDEPLOY = blockchain_pb2.REDEPLOY
SC_FEE_DELEGATION = blockchain_pb2.FEEDELEGATION
TRANSFER = blockchain_pb2.TRANSFER
SC_CALL = blockchain_pb2.CALL
SC_DEPLOY = blockchain_pb2.DEPLOY
class Transaction:
"""
Transaction data structure.
"""
def __init__(
self,
from_address: Union[bytes, Address, None] = None,
to_address: Union[bytes, Address, None] = None,
nonce: int = 0,
amount: Union[bytes, str, int, float] = 0,
payload: Optional[bytes] = None,
gas_price: int = 0,
gas_limit: int = 0,
read_only: bool = False,
tx_hash: Optional[bytes] = None,
tx_sign: Union[bytes, str, None] = None,
tx_type=TxType.TRANSFER,
chain_id: Optional[bytes] = None,
# typing TODO: circular import, consider removing block details from tx
block=None,
index_in_block: int = -1,
is_in_mempool: bool = False
) -> None:
self.__from_address = from_address
self.__to_address = to_address
self.__nonce = nonce
self.__amount = aer.Aer(amount)
self.__payload = payload
if isinstance(gas_price, bytes):
gas_price = int.from_bytes(gas_price, byteorder='big')
self.__gas_price = aer.Aer(gas_price)
if isinstance(gas_limit, bytes):
gas_limit = int.from_bytes(gas_limit, byteorder='little')
self.__gas_limit = gas_limit
if isinstance(tx_type, bytes):
tx_type = int.from_bytes(tx_type, byteorder='little')
self.__tx_type = TxType(tx_type)
self.__read_only = read_only
self.__tx_hash = txh.TxHash(tx_hash)
if isinstance(tx_sign, str):
tx_sign = decode_signature(tx_sign)
self.__sign = tx_sign
self.__chain_id = chain_id
self.__is_in_mempool = is_in_mempool
if is_in_mempool:
self.__block = None
self.__index_in_block = -1
else:
self.__block = block
self.__index_in_block = index_in_block
def calculate_hash(self, including_sign: bool = True) -> bytes:
m = hashlib.sha256()
# nonce
b = self.__nonce.to_bytes(8, byteorder='little')
m.update(b)
# from (account)
if self.__from_address:
m.update(bytes(self.__from_address))
# to (recipient)
if self.__to_address:
m.update(bytes(self.__to_address))
# amount
b = bytes(self.__amount)
m.update(b)
# payload
if self.__payload:
m.update(self.__payload)
# gas limit
b = self.__gas_limit.to_bytes(8, byteorder='little')
m.update(b)
# gas price
b = bytes(self.__gas_price)
m.update(b)
# type
b = self.__tx_type.value.to_bytes(4, byteorder='little')
m.update(b)
# chainIdHash
if self.__chain_id:
m.update(self.__chain_id)
# sign
if including_sign and self.__sign:
m.update(self.__sign)
return m.digest()
@property
def block(self):
return self.__block
@property
def index_in_block(self) -> int:
return self.__index_in_block
@property
def is_in_mempool(self) -> bool:
return self.__is_in_mempool
@property
def nonce(self) -> int:
return self.__nonce
@nonce.setter
def nonce(self, v: int) -> None:
if self.__read_only:
return
self.__nonce = v
@property
def from_address(self) -> Union[bytes, Address, None]:
return self.__from_address
@from_address.setter
def from_address(self, v: Union[bytes, Address, None]) -> None:
if self.__read_only:
return
self.__from_address = v
@property
def to_address(self) -> Union[bytes, Address, None]:
return self.__to_address
@to_address.setter
def to_address(self, v: Union[bytes, Address, None]) -> None:
if self.__read_only:
return
self.__to_address = v
@property
def amount(self) -> aer.Aer:
return self.__amount
@amount.setter
def amount(self, v: Union[bytes, str, int, float]) -> None:
if self.__read_only:
return
self.__amount = aer.Aer(v)
@property
def payload(self) -> Optional[bytes]:
return self.__payload
@payload.setter
def payload(self, v: Optional[bytes]) -> None:
if self.__read_only:
return
self.__payload = v
@property
def payload_str(self) -> Optional[str]:
if self.__payload is None:
return None
return encode_payload(self.__payload)
@property
def gas_limit(self) -> int:
return self.__gas_limit
@gas_limit.setter
def gas_limit(self, v: int) -> None:
if self.__read_only:
return
self.__gas_limit = v
@property
def gas_price(self) -> aer.Aer:
return self.__gas_price
@gas_price.setter
def gas_price(self, v: aer.Aer):
if self.__read_only:
return
self.__gas_price = v
@property
def tx_type(self):
return self.__tx_type
@tx_type.setter
def tx_type(self, v):
if self.__read_only:
return
self.__tx_type = TxType(v)
@property
def chain_id(self) -> Optional[bytes]:
return self.__chain_id
@chain_id.setter
def chain_id(self, v: Optional[bytes]) -> None:
if self.__read_only:
return
self.__chain_id = v
@property
def sign(self) -> Optional[bytes]:
return self.__sign
@sign.setter
def sign(self, v: Optional[bytes]) -> None:
if self.__read_only:
return
self.__sign = v
@property
def sign_str(self) -> Optional[str]:
if self.__sign is None:
return None
return encode_signature(self.__sign)
@property
def tx_hash(self) -> txh.TxHash:
if self.__read_only:
return self.__tx_hash
return txh.TxHash(self.calculate_hash())
def json(self, without_block: bool = False) -> Dict:
account = None
recipient = None
if self.from_address is not None:
account = str(self.from_address)
if self.to_address is not None:
recipient = str(self.to_address)
tx_json = {
"Hash": str(self.tx_hash),
"Body": {
"Nonce": self.nonce,
"Account": account,
"Recipient": recipient,
"Amount": str(self.amount),
"Payload": self.payload_str,
"GasPrice": str(self.gas_price),
"GasLimit": self.gas_limit,
"Sign": self.sign_str,
"Type": self.tx_type.name,
},
"IsInMempool": self.__is_in_mempool,
"IndexInBlock": self.__index_in_block,
}
if not without_block:
if self.__block is None:
tx_json["Block"] = None
else:
tx_json["Block"] = self.__block.json(header_only=True)
return tx_json
def __str__(self) -> str:
return json.dumps(self.json(), indent=2)
def __bytes__(self) -> bytes:
return bytes(self.tx_hash)
| [
"hashlib.sha256"
] | [((2629, 2645), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (2643, 2645), False, 'import hashlib\n')] |
from sqlalchemy import Integer, String, Column
from . import db
class TagModel(db.Model):
__tablename__ = 'tag'
id = Column(Integer, primary_key=True)
tag_name = Column(String(30), unique=True, nullable=False)
def __repr__(self):
return '<Tag %r>' % self.tag_name | [
"sqlalchemy.String",
"sqlalchemy.Column"
] | [((126, 159), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (132, 159), False, 'from sqlalchemy import Integer, String, Column\n'), ((182, 192), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (188, 192), False, 'from sqlalchemy import Integer, String, Column\n')] |
import logging
import os
import click
from analyzer.analyze import Analyzer
@click.group()
def cli():
pass
@click.command()
@click.option('--debug', default=False, help='enable debug log verbosity', is_flag=True)
@click.option('--crawler-json', default='../output/crawler.json', help='filepath to crawler.json')
@click.option('--skip-write', default=False, help='skip writing the results to file', is_flag=True)
def analyze(debug, crawler_json, skip_write):
""" This command analyzes the output of the crawler component. """
# Set up logging
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format='%(asctime)s %(levelname)s\t%(name)s\t%(message)s',
# filename='app.log',
# filemode='w',
)
# Start analyzer
main_dir = os.path.dirname(os.path.realpath(__file__))
analyzer = Analyzer(crawler_metadata_filepath=os.path.join(main_dir, crawler_json))
analyzer.run()
if not skip_write:
analyzer.write_results_to_file()
cli.add_command(analyze)
| [
"logging.basicConfig",
"click.group",
"click.option",
"os.path.join",
"os.path.realpath",
"click.command"
] | [((81, 94), 'click.group', 'click.group', ([], {}), '()\n', (92, 94), False, 'import click\n'), ((118, 133), 'click.command', 'click.command', ([], {}), '()\n', (131, 133), False, 'import click\n'), ((135, 226), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'help': '"""enable debug log verbosity"""', 'is_flag': '(True)'}), "('--debug', default=False, help='enable debug log verbosity',\n is_flag=True)\n", (147, 226), False, 'import click\n'), ((224, 326), 'click.option', 'click.option', (['"""--crawler-json"""'], {'default': '"""../output/crawler.json"""', 'help': '"""filepath to crawler.json"""'}), "('--crawler-json', default='../output/crawler.json', help=\n 'filepath to crawler.json')\n", (236, 326), False, 'import click\n'), ((323, 426), 'click.option', 'click.option', (['"""--skip-write"""'], {'default': '(False)', 'help': '"""skip writing the results to file"""', 'is_flag': '(True)'}), "('--skip-write', default=False, help=\n 'skip writing the results to file', is_flag=True)\n", (335, 426), False, 'import click\n'), ((564, 695), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '(logging.DEBUG if debug else logging.INFO)', 'format': '"""%(asctime)s %(levelname)s\t%(name)s\t%(message)s"""'}), "(level=logging.DEBUG if debug else logging.INFO, format=\n '%(asctime)s %(levelname)s\\t%(name)s\\t%(message)s')\n", (583, 695), False, 'import logging\n'), ((821, 847), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'import os\n'), ((899, 935), 'os.path.join', 'os.path.join', (['main_dir', 'crawler_json'], {}), '(main_dir, crawler_json)\n', (911, 935), False, 'import os\n')] |
import ampule
import socketpool
import wifi
import json
import os
try:
from secrets import secrets
except ImportError:
print("WiFi secrets not found in secrets.py")
raise
try:
print("Connecting to {}...".format(secrets["ssid"]))
wifi.radio.connect(secrets["ssid"], secrets["password"])
except:
print("Error connecting to WiFi")
raise
# Collect details about the system
details = {
"machine": os.uname().machine,
"release": os.uname().release,
"platform": os.uname().sysname,
}
def web_page():
"""Content for the web page."""
content = f"""
<!DOCTYPE html>
<html lang='en'>
<head>
<meta charset='utf-8'>
<meta name='viewport' content='width=device-width, initial-scale=1.0'>
<title>CircuitPython</title>
<!-- CSS could also be loaded from local storage or be embedded. --->
<link rel='stylesheet' href='https://cdn.jsdelivr.net/npm/water.css@2/out/water.css'>
</head>
<body>
<h1>CircuitPython</h1>
<p>This simple web page show some details about your board.</p>
<!-- All details are shown in a table --->
<div id="details"></div>
<p style='text-align:center;'>Content served by <a href='https://github.com/deckerego/ampule'>ampule</a> and styled with <a href='https://watercss.kognise.dev/'>Water.css</a>.</p>
<!-- If you include Javascript then keep an eye on the escaping, here it's a Python f-string. --->
<script>
window.addEventListener('load', () => {{
// The dict can not be used directly
data = JSON.parse('{json.dumps(details)}');
var table = document.createElement('table'), row, cell1, cell2;
document.getElementById('details').appendChild(table);
for (let key in data) {{
row = table.insertRow();
cell1 = row.insertCell();
cell2 = row.insertCell();
cell1.innerHTML = key;
cell2.innerHTML = data[key];
}}
}});
</script>
</body>
</html>
"""
return content
def static_file():
"""Load the web page from the CIRCUITPY drive."""
with open('demo.html') as local_file:
content = local_file.read()
return content
@ampule.route("/")
def index(request):
"""Route for the default."""
return (200, {}, web_page())
@ampule.route("/demo")
def demo(request):
"""Route for the local file."""
return (200, {}, static_file())
pool = socketpool.SocketPool(wifi.radio)
socket = pool.socket()
socket.bind(['0.0.0.0', 80])
socket.listen(1)
print("Connected to {}, Web server running on http://{}:80".format(secrets["ssid"], wifi.radio.ipv4_address))
while True:
ampule.listen(socket)
| [
"ampule.listen",
"wifi.radio.connect",
"json.dumps",
"ampule.route",
"socketpool.SocketPool",
"os.uname"
] | [((2297, 2314), 'ampule.route', 'ampule.route', (['"""/"""'], {}), "('/')\n", (2309, 2314), False, 'import ampule\n'), ((2404, 2425), 'ampule.route', 'ampule.route', (['"""/demo"""'], {}), "('/demo')\n", (2416, 2425), False, 'import ampule\n'), ((2526, 2559), 'socketpool.SocketPool', 'socketpool.SocketPool', (['wifi.radio'], {}), '(wifi.radio)\n', (2547, 2559), False, 'import socketpool\n'), ((251, 307), 'wifi.radio.connect', 'wifi.radio.connect', (["secrets['ssid']", "secrets['password']"], {}), "(secrets['ssid'], secrets['password'])\n", (269, 307), False, 'import wifi\n'), ((2756, 2777), 'ampule.listen', 'ampule.listen', (['socket'], {}), '(socket)\n', (2769, 2777), False, 'import ampule\n'), ((428, 438), 'os.uname', 'os.uname', ([], {}), '()\n', (436, 438), False, 'import os\n'), ((463, 473), 'os.uname', 'os.uname', ([], {}), '()\n', (471, 473), False, 'import os\n'), ((499, 509), 'os.uname', 'os.uname', ([], {}), '()\n', (507, 509), False, 'import os\n'), ((1636, 1655), 'json.dumps', 'json.dumps', (['details'], {}), '(details)\n', (1646, 1655), False, 'import json\n')] |
import os.path
import uuid
import urllib.parse
import subprocess
import sys
import datetime
import pickle
import base64
import tabulate
import click
import simplejson as json
from gemstone_admin.structs import Service, Configuration
GEMSTONE_DIR = os.path.join(os.path.expanduser("~"), ".gemstone")
CONFIG_FILE = os.path.join(GEMSTONE_DIR, ".admin")
if not os.path.isdir(GEMSTONE_DIR):
os.mkdir(GEMSTONE_DIR)
if not os.path.isfile(CONFIG_FILE):
with open(CONFIG_FILE, "w") as f:
f.write(json.dumps({"env": {}, "installed": {}, "running": {}}))
def read_config_file():
return Configuration.from_file(CONFIG_FILE)
def modify_env_value(key, value):
current_config = read_config_file()
current_config.add_env_value(key, value)
current_config.save_to_file(CONFIG_FILE)
def get_value_from_config(key):
current_config = read_config_file()
return current_config.get_env_value(key)
def get_keys_from_config():
current_config = read_config_file()
return current_config.list_env_keys()
def register_service(service):
current_config = read_config_file()
current_config.add_service(service)
current_config.save_to_file(CONFIG_FILE)
@click.group()
def cli():
pass
@click.group(help='Global configuration management')
def config():
pass
@click.group(help='Service configuration')
def service():
pass
@click.group(help='Running microservice instances')
def instance():
pass
@click.command("reset")
def reset():
os.remove(CONFIG_FILE)
cli.add_command(config)
cli.add_command(service)
cli.add_command(instance)
cli.add_command(reset)
# region service
@click.command("install", help="Installs a service from the given source")
@click.argument("install_source")
@click.option("--module_name", default=None)
def service_install(install_source, module_name):
click.echo("Installing from {}".format(install_source))
if not module_name:
click.echo("No name specified. Assuming {}".format(install_source))
module_name = install_source
click.echo("Module name: {}".format(module_name))
click.echo("Service module: {}.service".format(module_name))
service = Service(module_name, install_source)
if service.install():
register_service(service)
click.echo(click.style("Finished", fg="green"))
else:
click.echo(click.style(service.info, fg="red"))
@click.command("uninstall", help="Uninstalls a service")
@click.argument("name")
def service_uninstall(name):
pass
@click.command("list", help="Lists all installed services")
def service_list():
current_config = read_config_file()
service_data = []
for service in current_config.iter_services():
service_data.append(
[service.id, service.name, service.service_module, service.config_module, service.install_source])
click.echo(
tabulate.tabulate(service_data, headers=("Id", "Name", "Service module", "Config module", "Install source")))
service.add_command(service_install)
service.add_command(service_uninstall)
service.add_command(service_list)
# endregion
# region config
@click.command("write")
@click.argument("key")
@click.argument("value")
def write_config(key, value):
modify_env_value(key, value)
@click.command("read")
@click.argument("key")
def read_config(key):
value = get_value_from_config(key)
if not value:
click.echo(click.style("Key does not exist", fg="red"))
else:
click.echo(value)
@click.command("list")
def list_config():
current_config = read_config_file()
items = []
for k in current_config.list_env_keys():
items.append((k, current_config.get_env_value(k)))
items.sort(key=lambda x: x[0])
print(tabulate.tabulate(items, headers=["Key", "Value"]))
config.add_command(write_config)
config.add_command(read_config)
config.add_command(list_config)
# endregion
| [
"gemstone_admin.structs.Configuration.from_file",
"tabulate.tabulate",
"click.argument",
"click.group",
"click.option",
"simplejson.dumps",
"click.style",
"click.echo",
"click.command",
"gemstone_admin.structs.Service"
] | [((1197, 1210), 'click.group', 'click.group', ([], {}), '()\n', (1208, 1210), False, 'import click\n'), ((1234, 1285), 'click.group', 'click.group', ([], {'help': '"""Global configuration management"""'}), "(help='Global configuration management')\n", (1245, 1285), False, 'import click\n'), ((1312, 1353), 'click.group', 'click.group', ([], {'help': '"""Service configuration"""'}), "(help='Service configuration')\n", (1323, 1353), False, 'import click\n'), ((1381, 1431), 'click.group', 'click.group', ([], {'help': '"""Running microservice instances"""'}), "(help='Running microservice instances')\n", (1392, 1431), False, 'import click\n'), ((1460, 1482), 'click.command', 'click.command', (['"""reset"""'], {}), "('reset')\n", (1473, 1482), False, 'import click\n'), ((1644, 1717), 'click.command', 'click.command', (['"""install"""'], {'help': '"""Installs a service from the given source"""'}), "('install', help='Installs a service from the given source')\n", (1657, 1717), False, 'import click\n'), ((1719, 1751), 'click.argument', 'click.argument', (['"""install_source"""'], {}), "('install_source')\n", (1733, 1751), False, 'import click\n'), ((1753, 1796), 'click.option', 'click.option', (['"""--module_name"""'], {'default': 'None'}), "('--module_name', default=None)\n", (1765, 1796), False, 'import click\n'), ((2402, 2457), 'click.command', 'click.command', (['"""uninstall"""'], {'help': '"""Uninstalls a service"""'}), "('uninstall', help='Uninstalls a service')\n", (2415, 2457), False, 'import click\n'), ((2459, 2481), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (2473, 2481), False, 'import click\n'), ((2523, 2581), 'click.command', 'click.command', (['"""list"""'], {'help': '"""Lists all installed services"""'}), "('list', help='Lists all installed services')\n", (2536, 2581), False, 'import click\n'), ((3135, 3157), 'click.command', 'click.command', (['"""write"""'], {}), "('write')\n", (3148, 3157), False, 'import click\n'), ((3159, 3180), 'click.argument', 'click.argument', (['"""key"""'], {}), "('key')\n", (3173, 3180), False, 'import click\n'), ((3182, 3205), 'click.argument', 'click.argument', (['"""value"""'], {}), "('value')\n", (3196, 3205), False, 'import click\n'), ((3272, 3293), 'click.command', 'click.command', (['"""read"""'], {}), "('read')\n", (3285, 3293), False, 'import click\n'), ((3295, 3316), 'click.argument', 'click.argument', (['"""key"""'], {}), "('key')\n", (3309, 3316), False, 'import click\n'), ((3499, 3520), 'click.command', 'click.command', (['"""list"""'], {}), "('list')\n", (3512, 3520), False, 'import click\n'), ((602, 638), 'gemstone_admin.structs.Configuration.from_file', 'Configuration.from_file', (['CONFIG_FILE'], {}), '(CONFIG_FILE)\n', (625, 638), False, 'from gemstone_admin.structs import Service, Configuration\n'), ((2180, 2216), 'gemstone_admin.structs.Service', 'Service', (['module_name', 'install_source'], {}), '(module_name, install_source)\n', (2187, 2216), False, 'from gemstone_admin.structs import Service, Configuration\n'), ((2880, 2992), 'tabulate.tabulate', 'tabulate.tabulate', (['service_data'], {'headers': "('Id', 'Name', 'Service module', 'Config module', 'Install source')"}), "(service_data, headers=('Id', 'Name', 'Service module',\n 'Config module', 'Install source'))\n", (2897, 2992), False, 'import tabulate\n'), ((3478, 3495), 'click.echo', 'click.echo', (['value'], {}), '(value)\n', (3488, 3495), False, 'import click\n'), ((3744, 3794), 'tabulate.tabulate', 'tabulate.tabulate', (['items'], {'headers': "['Key', 'Value']"}), "(items, headers=['Key', 'Value'])\n", (3761, 3794), False, 'import tabulate\n'), ((508, 563), 'simplejson.dumps', 'json.dumps', (["{'env': {}, 'installed': {}, 'running': {}}"], {}), "({'env': {}, 'installed': {}, 'running': {}})\n", (518, 563), True, 'import simplejson as json\n'), ((2296, 2331), 'click.style', 'click.style', (['"""Finished"""'], {'fg': '"""green"""'}), "('Finished', fg='green')\n", (2307, 2331), False, 'import click\n'), ((2362, 2397), 'click.style', 'click.style', (['service.info'], {'fg': '"""red"""'}), "(service.info, fg='red')\n", (2373, 2397), False, 'import click\n'), ((3415, 3458), 'click.style', 'click.style', (['"""Key does not exist"""'], {'fg': '"""red"""'}), "('Key does not exist', fg='red')\n", (3426, 3458), False, 'import click\n')] |
import torch
import torchvision
from torchvision import models
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import os
def upsize(x,scale_factor=2):
#x = F.interpolate(x, size=e.shape[2:], mode='nearest')
x = F.interpolate(x, scale_factor=scale_factor, mode='nearest')
return x
class DecoderBlock(nn.Module):
def __init__(self,
in_channels=512,
out_channels=256,
kernel_size=3,
is_deconv=False,
):
super().__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, out_channels // 2, kernel_size=3, stride=1, padding=1, bias=False)
self.norm1 = nn.BatchNorm2d(out_channels // 2)
self.relu1 = nn.ReLU(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
'''
if is_deconv == True:
self.deconv2 = nn.ConvTranspose2d(in_channels // 4,
in_channels // 4,
3,
stride=2,
padding=1,
output_padding=conv_padding,bias=False)
else:
self.deconv2 = nn.Upsample(scale_factor=2,**up_kwargs)
'''
self.conv2 = nn.Conv2d(out_channels // 2, out_channels // 2, kernel_size=3, stride=1, padding=1, bias=False)
self.norm2 = nn.BatchNorm2d(out_channels // 2)
self.relu2 = nn.ReLU(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(out_channels // 2, out_channels , kernel_size=3, stride=1, padding=1, bias=False)
self.norm3 = nn.BatchNorm2d( out_channels)
self.relu3 = nn.ReLU(inplace=True)
def forward(self, x):
x = torch.cat(x,1)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ResNet34UnetPlus(nn.Module):
def __init__(self,
num_channels=1,
num_class=1,
is_deconv=False,
decoder_kernel_size=3,
):
super().__init__()
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=False)
self.base_size=512
self.crop_size=512
self._up_kwargs={'mode': 'bilinear', 'align_corners': True}
self.mix = nn.Parameter(torch.FloatTensor(5))
self.mix.data.fill_(1)
# self.firstconv = resnet.conv1
# assert num_channels == 3, "num channels not used now. to use changle first conv layer to support num channels other then 3"
# try to use 8-channels as first input
if num_channels == 3:
self.firstconv = resnet.conv1
else:
self.firstconv = nn.Conv2d(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3),bias=False)
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder0_1 = DecoderBlock(in_channels=64+64,
out_channels=64,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1_1 = DecoderBlock(in_channels=128+64,
out_channels=64,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder0_2 = DecoderBlock(in_channels=64+64+64,
out_channels=64,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2_1 = DecoderBlock(in_channels=128+256,
out_channels=128,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1_2 = DecoderBlock(in_channels=64+64+128,
out_channels=128,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder0_3 = DecoderBlock(in_channels=64+64+64+128,
out_channels=128,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder3_1 = DecoderBlock(in_channels=512+256,
out_channels=256,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder2_2 = DecoderBlock(in_channels=128+128+256,
out_channels=256,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder1_3 = DecoderBlock(in_channels=64+64+128+256,
out_channels=256,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.decoder0_4 = DecoderBlock(in_channels=64+64+64+128+256,
out_channels=256,
kernel_size=decoder_kernel_size,
is_deconv=is_deconv)
self.logit1 = nn.Conv2d( 64,num_class, kernel_size=1)
self.logit2 = nn.Conv2d( 64,num_class, kernel_size=1)
self.logit3 = nn.Conv2d(128,num_class, kernel_size=1)
self.logit4 = nn.Conv2d(256,num_class, kernel_size=1)
def require_encoder_grad(self, requires_grad):
blocks = [self.firstconv,
self.encoder1,
self.encoder2,
self.encoder3,
self.encoder4]
for block in blocks:
for p in block.parameters():
p.requires_grad = requires_grad
def forward(self, x):
_,_, H,W = x.shape
# stem
x = self.firstconv(x) #subsample
x = self.firstbn(x)
x_ = self.firstrelu(x)
# Encoder
x = self.firstmaxpool(x_) #64
e1 = self.encoder1(x) #64
e2 = self.encoder2(e1) #128
e3 = self.encoder3(e2) #256
e4 = self.encoder4(e3) #512
#--------Unet Plus Plus Decoder----------------------------------------------
x0_0 = x_
x1_0 = e1
print(x0_0.shape, x1_0.shape) #64 128 128
x0_1 = self.decoder0_1([x0_0, upsize(x1_0)]) # 256 256
x2_0 = e2
x1_1 = self.decoder1_1([x1_0, upsize(x2_0)])
print(x0_0.shape, x0_1.shape, x1_1.shape)
x0_2 = self.decoder0_2([x0_0, x0_1, upsize(x1_1)])
x3_0 = e3
x2_1 = self.decoder2_1([x2_0, upsize(x3_0)])
x1_2 = self.decoder1_2([x1_0, x1_1, upsize(x2_1)])
x0_3 = self.decoder0_3([x0_0, x0_1, x0_2, upsize(x1_2)])
x4_0 = e4
x3_1 = self.decoder3_1([x3_0, upsize(x4_0)])
x2_2 = self.decoder2_2([x2_0, x2_1, upsize(x3_1)])
x1_3 = self.decoder1_3([x1_0, x1_1, x1_2, upsize(x2_2)])
x0_4 = self.decoder0_4([x0_0, x0_1, x0_2, x0_3, upsize(x1_3)])
logit1 = self.logit1(x0_1)
logit2 = self.logit2(x0_2)
logit3 = self.logit3(x0_3)
logit4 = self.logit4(x0_4)
print(self.mix)
logit = self.mix[1]*logit1 + self.mix[2]*logit2 + self.mix[3]*logit3 + self.mix[4]*logit4
logit = F.interpolate(logit, size=(H,W), mode='bilinear', align_corners=False)
return logit
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torchvision.models.resnet34",
"torch.nn.functional.interpolate",
"torch.FloatTensor",
"torch.cat"
] | [((255, 314), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': 'scale_factor', 'mode': '"""nearest"""'}), "(x, scale_factor=scale_factor, mode='nearest')\n", (268, 314), True, 'import torch.nn.functional as F\n'), ((625, 719), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(out_channels // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(in_channels, out_channels // 2, kernel_size=3, stride=1, padding=\n 1, bias=False)\n', (634, 719), True, 'import torch.nn as nn\n'), ((736, 769), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(out_channels // 2)'], {}), '(out_channels // 2)\n', (750, 769), True, 'import torch.nn as nn\n'), ((791, 812), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (798, 812), True, 'import torch.nn as nn\n'), ((1386, 1485), 'torch.nn.Conv2d', 'nn.Conv2d', (['(out_channels // 2)', '(out_channels // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(out_channels // 2, out_channels // 2, kernel_size=3, stride=1,\n padding=1, bias=False)\n', (1395, 1485), True, 'import torch.nn as nn\n'), ((1503, 1536), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(out_channels // 2)'], {}), '(out_channels // 2)\n', (1517, 1536), True, 'import torch.nn as nn\n'), ((1558, 1579), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1565, 1579), True, 'import torch.nn as nn\n'), ((1640, 1735), 'torch.nn.Conv2d', 'nn.Conv2d', (['(out_channels // 2)', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(out_channels // 2, out_channels, kernel_size=3, stride=1, padding\n =1, bias=False)\n', (1649, 1735), True, 'import torch.nn as nn\n'), ((1753, 1781), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1767, 1781), True, 'import torch.nn as nn\n'), ((1804, 1825), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1811, 1825), True, 'import torch.nn as nn\n'), ((1866, 1881), 'torch.cat', 'torch.cat', (['x', '(1)'], {}), '(x, 1)\n', (1875, 1881), False, 'import torch\n'), ((2309, 2414), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False)\n', (2318, 2414), True, 'import torch.nn as nn\n'), ((2510, 2574), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'eps': '(0.001)', 'momentum': '(0.1)', 'affine': '(True)'}), '(out_planes, eps=0.001, momentum=0.1, affine=True)\n', (2524, 2574), True, 'import torch.nn as nn\n'), ((2748, 2770), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (2755, 2770), True, 'import torch.nn as nn\n'), ((3192, 3225), 'torchvision.models.resnet34', 'models.resnet34', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (3207, 3225), False, 'from torchvision import models\n'), ((6597, 6636), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'num_class'], {'kernel_size': '(1)'}), '(64, num_class, kernel_size=1)\n', (6606, 6636), True, 'import torch.nn as nn\n'), ((6659, 6698), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'num_class'], {'kernel_size': '(1)'}), '(64, num_class, kernel_size=1)\n', (6668, 6698), True, 'import torch.nn as nn\n'), ((6721, 6761), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', 'num_class'], {'kernel_size': '(1)'}), '(128, num_class, kernel_size=1)\n', (6730, 6761), True, 'import torch.nn as nn\n'), ((6783, 6823), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'num_class'], {'kernel_size': '(1)'}), '(256, num_class, kernel_size=1)\n', (6792, 6823), True, 'import torch.nn as nn\n'), ((8700, 8771), 'torch.nn.functional.interpolate', 'F.interpolate', (['logit'], {'size': '(H, W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(logit, size=(H, W), mode='bilinear', align_corners=False)\n", (8713, 8771), True, 'import torch.nn.functional as F\n'), ((3398, 3418), 'torch.FloatTensor', 'torch.FloatTensor', (['(5)'], {}), '(5)\n', (3415, 3418), False, 'import torch\n'), ((3796, 3891), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_channels', '(64)'], {'kernel_size': '(7, 7)', 'stride': '(2, 2)', 'padding': '(3, 3)', 'bias': '(False)'}), '(num_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, \n 3), bias=False)\n', (3805, 3891), True, 'import torch.nn as nn\n')] |
"""Solving the standard 1D wave equation using the spacetime CG-CG method.
Equation:
u'' - uxx = f, in [0,T]x[0,1],
u(0,x) = u0(x), u'(0,x) = u1(x), for x in [0,1],
u(t,0) = u(t,1) = 0, for t in [0,T].
Mesh:
Space-time mesh for [0,T]x[0,1]
Weak formulation: find u in H1([0,T], H01[0,1]) with the initial condition,
- (u', v') + (ux, vx) = (f, v) + [u1, v(0)]
for all v in H1([0,T], H01[0,1]) satisfying the terminal boundary condition
v(T,x)=0.
"""
from fenics import (Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point,
FunctionSpace, MeshFunction, TestFunction, TrialFunction,
Function, LUSolver, DirichletBC, PETScVector, PETScMatrix,
Constant, MeshEditor,
near, as_backend_type, ds, dot, grad, solve, assemble, dx)
import numpy as np
from petsc4py import PETSc
def mesh_randomizer_2d(mesh, percentage, preserve_boundary=True):
"""
Randomly perturb a given mesh.
Args:
mesh: Input mesh.
percentage: Maximum perturbation in percentage of mesh.hmin().
preserve_boundary: Whether to move the vertices on the boundary.
Returns:
rmesh: The perturbed mesh.
"""
# Generate a deep copy of the mesh
rmesh = Mesh(mesh)
meshsize = rmesh.hmin()
# Randomly perturbed the mesh
radius = np.random.rand(rmesh.num_vertices()) * percentage * meshsize
theta = np.random.rand(rmesh.num_vertices()) * 2.0 * np.pi
deltax = np.zeros([rmesh.num_vertices(), 2])
deltax[:, 0] = (radius * np.sin(theta)).transpose()
deltax[:, 1] = (radius * np.cos(theta)).transpose()
# What to do with the boundary vertices
if preserve_boundary:
# Exterior means global boundary
boundary_mesh = BoundaryMesh(rmesh, "exterior")
# entity_map contains the indices of vertices on the boundary
boundary_vertices = boundary_mesh.entity_map(0).array()
deltax[boundary_vertices] = 0.0
rmesh.coordinates()[:] = rmesh.coordinates() + deltax
return rmesh
def get_dof_by_criterion(space, criterion):
""" Return dofs with coordinates satisfying a given condition.
Args:
space: The function space.
criterion: A boolean function which takes a coordinate (numpy array).
Outputs:
dof_no: A list of global indices of dofs where criterion is True.
dof_coor: A list corresponds to the coordinates of dofs in dof_no.
"""
gdim = space.mesh().geometry().dim()
dof_coors = space.tabulate_dof_coordinates().reshape((-1, gdim))
return list(zip(*[(i, coor)
for (i, coor) in enumerate(dof_coors)
if criterion(coor)]))
class TemporalSlice(SubDomain):
"""A temporal slice of the space-time domain."""
def __init__(self, time):
self.time = time
SubDomain.__init__(self)
def inside(self, coor, on_boundary):
return near(coor[0], self.time)
class Boundary(SubDomain):
"""Spatial boundary of the space-time domain."""
def __init__(self, left, right):
self.left = left
self.right = right
SubDomain.__init__(self)
def inside(self, pos, on_boundary):
return (near(pos[1], self.left)) or (near(pos[1], self.right))
class SpaceTimeDomain:
"""(1+1) space-time domain [t0, t1] x [x0, x1]."""
def __init__(self, t0, t1, x0, x1):
self.t0 = t0
self.t1 = t1
self.x0 = x0
self.x1 = x1
def get_initial_slice(self):
"""Generate temporal domains for marking mesh."""
return TemporalSlice(self.t0)
def get_terminal_slice(self):
"""Generate temporal domains for marking mesh."""
return TemporalSlice(self.t1)
def get_spatial_boundary(self):
"""Generate spatial domains for marking mesh."""
return Boundary(self.x0, self.x1)
def get_uniform_mesh(self, temporal_nodes, spatial_nodes):
"""Generate uniform mesh of the spacetime."""
return RectangleMesh(Point(self.t0, self.x0),
Point(self.t1, self.x1),
temporal_nodes, spatial_nodes)
def apply_time_boundary_conditions(domain, V, u0, A, b):
"""Apply the time slice boundary conditions by hand.
Args:
domain: Space-time domain.
V: Function space.
u0: Initial data.
A: The stiffness matrix.
b: The right-hand side.
Outputs:
A: The new stiffness matrix with the boundary conditions.
b: The new right-hand side with the boundary conditions.
"""
# Export matrices to PETSc
A = as_backend_type(A).mat()
b = as_backend_type(b).vec()
# Apply terminal boundary condition on v by zeroing the corresponding
# matrix rows. The dof indices are saved for later.
def on_terminal_slice(x):
return domain.get_terminal_slice().inside(x, True) \
and (not domain.get_spatial_boundary().inside(x, True))
(rows_to_zero, _) = get_dof_by_criterion(V, on_terminal_slice)
A.zeroRows(rows_to_zero, diag=0)
# Apply initial boundary condition on u
def on_initial_slice(x):
return domain.get_initial_slice().inside(x, True) \
and (not domain.get_spatial_boundary().inside(x, True))
(dof_no, dof_coor) = get_dof_by_criterion(V, on_initial_slice)
# Update the matrices
A.setOption(PETSc.Mat.Option.NEW_NONZERO_LOCATION_ERR, 0)
for (i, k) in enumerate(dof_no):
j = rows_to_zero[i]
A[j, k] = 1.0
b[j] = u0(dof_coor[i])
A.assemble()
b.assemble()
# put petsc4py matrix back to fenics
A = PETScMatrix(A)
b = PETScVector(b)
return (A, b)
def solve_wave_equation(u0, u1, u_boundary, f, domain, mesh, degree):
"""Solving the wave equation using CG-CG method.
Args:
u0: Initial data.
u1: Initial velocity.
u_boundary: Dirichlet boundary condition.
f: Right-hand side.
domain: Space-time domain.
mesh: Computational mesh.
degree: CG(degree) will be used as the finite element.
Outputs:
uh: Numerical solution.
"""
# Element
V = FunctionSpace(mesh, "CG", degree)
# Measures on the initial and terminal slice
mask = MeshFunction("size_t", mesh, mesh.topology().dim() - 1, 0)
domain.get_initial_slice().mark(mask, 1)
ends = ds(subdomain_data=mask)
# Form
g = Constant(((-1.0, 0.0), (0.0, 1.0)))
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(v), dot(g, grad(u))) * dx
L = f * v * dx + u1 * v * ends(1)
# Assembled matrices
A = assemble(a, keep_diagonal=True)
b = assemble(L, keep_diagonal=True)
# Spatial boundary condition
bc = DirichletBC(V, u_boundary, domain.get_spatial_boundary())
bc.apply(A, b)
# Temporal boundary conditions (by hand)
(A, b) = apply_time_boundary_conditions(domain, V, u0, A, b)
# Solve
solver = LUSolver()
solver.set_operator(A)
uh = Function(V)
solver.solve(uh.vector(), b)
return uh
def unit_mesh(ht, hx):
editor = MeshEditor()
mesh = Mesh()
editor.open(mesh, "triangle", 2, 2)
editor.init_vertices(7)
editor.add_vertex(0, np.array([0.0, 0.0]))
editor.add_vertex(1, np.array([ht / 2.0, 0.0]))
editor.add_vertex(2, np.array([0.0, hx / 2.0]))
editor.add_vertex(3, np.array([ht / 2.0, hx / 2.0]))
editor.add_vertex(4, np.array([ht, hx / 2.0]))
editor.add_vertex(5, np.array([ht / 2.0, hx]))
editor.add_vertex(6, np.array([ht, hx]))
editor.init_cells(6)
editor.add_cell(0, np.array([0, 1, 3], dtype=np.uintp))
editor.add_cell(1, np.array([0, 2, 3], dtype=np.uintp))
editor.add_cell(2, np.array([1, 3, 4], dtype=np.uintp))
editor.add_cell(3, np.array([2, 3, 5], dtype=np.uintp))
editor.add_cell(4, np.array([3, 4, 6], dtype=np.uintp))
editor.add_cell(5, np.array([3, 5, 6], dtype=np.uintp))
editor.close()
mesh.order()
return mesh
| [
"fenics.Point",
"fenics.TrialFunction",
"fenics.MeshEditor",
"numpy.array",
"numpy.sin",
"fenics.near",
"fenics.as_backend_type",
"fenics.Constant",
"fenics.ds",
"fenics.FunctionSpace",
"fenics.assemble",
"fenics.Mesh",
"fenics.TestFunction",
"fenics.PETScMatrix",
"fenics.Function",
"numpy.cos",
"fenics.BoundaryMesh",
"fenics.LUSolver",
"fenics.SubDomain.__init__",
"fenics.grad",
"fenics.PETScVector"
] | [((1279, 1289), 'fenics.Mesh', 'Mesh', (['mesh'], {}), '(mesh)\n', (1283, 1289), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((5659, 5673), 'fenics.PETScMatrix', 'PETScMatrix', (['A'], {}), '(A)\n', (5670, 5673), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((5682, 5696), 'fenics.PETScVector', 'PETScVector', (['b'], {}), '(b)\n', (5693, 5696), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6191, 6224), 'fenics.FunctionSpace', 'FunctionSpace', (['mesh', '"""CG"""', 'degree'], {}), "(mesh, 'CG', degree)\n", (6204, 6224), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6400, 6423), 'fenics.ds', 'ds', ([], {'subdomain_data': 'mask'}), '(subdomain_data=mask)\n', (6402, 6423), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6443, 6478), 'fenics.Constant', 'Constant', (['((-1.0, 0.0), (0.0, 1.0))'], {}), '(((-1.0, 0.0), (0.0, 1.0)))\n', (6451, 6478), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6487, 6503), 'fenics.TrialFunction', 'TrialFunction', (['V'], {}), '(V)\n', (6500, 6503), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6512, 6527), 'fenics.TestFunction', 'TestFunction', (['V'], {}), '(V)\n', (6524, 6527), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6642, 6673), 'fenics.assemble', 'assemble', (['a'], {'keep_diagonal': '(True)'}), '(a, keep_diagonal=True)\n', (6650, 6673), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6682, 6713), 'fenics.assemble', 'assemble', (['L'], {'keep_diagonal': '(True)'}), '(L, keep_diagonal=True)\n', (6690, 6713), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6968, 6978), 'fenics.LUSolver', 'LUSolver', ([], {}), '()\n', (6976, 6978), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((7015, 7026), 'fenics.Function', 'Function', (['V'], {}), '(V)\n', (7023, 7026), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((7112, 7124), 'fenics.MeshEditor', 'MeshEditor', ([], {}), '()\n', (7122, 7124), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((7136, 7142), 'fenics.Mesh', 'Mesh', ([], {}), '()\n', (7140, 7142), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((1785, 1816), 'fenics.BoundaryMesh', 'BoundaryMesh', (['rmesh', '"""exterior"""'], {}), "(rmesh, 'exterior')\n", (1797, 1816), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((2870, 2894), 'fenics.SubDomain.__init__', 'SubDomain.__init__', (['self'], {}), '(self)\n', (2888, 2894), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((2952, 2976), 'fenics.near', 'near', (['coor[0]', 'self.time'], {}), '(coor[0], self.time)\n', (2956, 2976), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((3156, 3180), 'fenics.SubDomain.__init__', 'SubDomain.__init__', (['self'], {}), '(self)\n', (3174, 3180), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((7236, 7256), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (7244, 7256), True, 'import numpy as np\n'), ((7283, 7308), 'numpy.array', 'np.array', (['[ht / 2.0, 0.0]'], {}), '([ht / 2.0, 0.0])\n', (7291, 7308), True, 'import numpy as np\n'), ((7335, 7360), 'numpy.array', 'np.array', (['[0.0, hx / 2.0]'], {}), '([0.0, hx / 2.0])\n', (7343, 7360), True, 'import numpy as np\n'), ((7387, 7417), 'numpy.array', 'np.array', (['[ht / 2.0, hx / 2.0]'], {}), '([ht / 2.0, hx / 2.0])\n', (7395, 7417), True, 'import numpy as np\n'), ((7444, 7468), 'numpy.array', 'np.array', (['[ht, hx / 2.0]'], {}), '([ht, hx / 2.0])\n', (7452, 7468), True, 'import numpy as np\n'), ((7495, 7519), 'numpy.array', 'np.array', (['[ht / 2.0, hx]'], {}), '([ht / 2.0, hx])\n', (7503, 7519), True, 'import numpy as np\n'), ((7546, 7564), 'numpy.array', 'np.array', (['[ht, hx]'], {}), '([ht, hx])\n', (7554, 7564), True, 'import numpy as np\n'), ((7614, 7649), 'numpy.array', 'np.array', (['[0, 1, 3]'], {'dtype': 'np.uintp'}), '([0, 1, 3], dtype=np.uintp)\n', (7622, 7649), True, 'import numpy as np\n'), ((7674, 7709), 'numpy.array', 'np.array', (['[0, 2, 3]'], {'dtype': 'np.uintp'}), '([0, 2, 3], dtype=np.uintp)\n', (7682, 7709), True, 'import numpy as np\n'), ((7734, 7769), 'numpy.array', 'np.array', (['[1, 3, 4]'], {'dtype': 'np.uintp'}), '([1, 3, 4], dtype=np.uintp)\n', (7742, 7769), True, 'import numpy as np\n'), ((7794, 7829), 'numpy.array', 'np.array', (['[2, 3, 5]'], {'dtype': 'np.uintp'}), '([2, 3, 5], dtype=np.uintp)\n', (7802, 7829), True, 'import numpy as np\n'), ((7854, 7889), 'numpy.array', 'np.array', (['[3, 4, 6]'], {'dtype': 'np.uintp'}), '([3, 4, 6], dtype=np.uintp)\n', (7862, 7889), True, 'import numpy as np\n'), ((7914, 7949), 'numpy.array', 'np.array', (['[3, 5, 6]'], {'dtype': 'np.uintp'}), '([3, 5, 6], dtype=np.uintp)\n', (7922, 7949), True, 'import numpy as np\n'), ((3238, 3261), 'fenics.near', 'near', (['pos[1]', 'self.left'], {}), '(pos[1], self.left)\n', (3242, 3261), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((3267, 3291), 'fenics.near', 'near', (['pos[1]', 'self.right'], {}), '(pos[1], self.right)\n', (3271, 3291), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((4041, 4064), 'fenics.Point', 'Point', (['self.t0', 'self.x0'], {}), '(self.t0, self.x0)\n', (4046, 4064), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((4095, 4118), 'fenics.Point', 'Point', (['self.t1', 'self.x1'], {}), '(self.t1, self.x1)\n', (4100, 4118), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((4650, 4668), 'fenics.as_backend_type', 'as_backend_type', (['A'], {}), '(A)\n', (4665, 4668), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((4683, 4701), 'fenics.as_backend_type', 'as_backend_type', (['b'], {}), '(b)\n', (4698, 4701), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((6540, 6547), 'fenics.grad', 'grad', (['v'], {}), '(v)\n', (6544, 6547), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n'), ((1567, 1580), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1573, 1580), True, 'import numpy as np\n'), ((1623, 1636), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1629, 1636), True, 'import numpy as np\n'), ((6556, 6563), 'fenics.grad', 'grad', (['u'], {}), '(u)\n', (6560, 6563), False, 'from fenics import Mesh, BoundaryMesh, SubDomain, RectangleMesh, Point, FunctionSpace, MeshFunction, TestFunction, TrialFunction, Function, LUSolver, DirichletBC, PETScVector, PETScMatrix, Constant, MeshEditor, near, as_backend_type, ds, dot, grad, solve, assemble, dx\n')] |
import random
space_list=['-' for x in range(100)]
hash_list = [random.randint(0,1000) for x in range(100)]
for hash_data in hash_list:
hash_mod = hash_data % 100
if space_list[hash_mod]=='-':
space_list[hash_mod]=hash_data
else:
for x in range(100):
if space_list[(hash_mod + x)%100]=='-':
space_list[(hash_mod + x)%100]=hash_data
break
for x in space_list:
print(x)
| [
"random.randint"
] | [((66, 89), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (80, 89), False, 'import random\n')] |
import torch
def actual_acc(preds, targs):
preds = torch.max(preds, dim=1)[1]
corr = 0
tot = 0
for j in np.arange(0, len(preds), 50):
acc1 = (preds==targs).float().mean()
if acc1 >= 0.5:
corr += 1
tot += 1
return corr / tot | [
"torch.max"
] | [((56, 79), 'torch.max', 'torch.max', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (65, 79), False, 'import torch\n')] |
#pylint: skip-file
import sys
import os
# This is the directory of the source code that the web app will run from
sys.path.append("/home/phaesler/src/datacube/wms")
# The location of the datcube config file.
os.environ.setdefault("DATACUBE_CONFIG_PATH", "/home/phaesler/.datacube.conf.local")
from datacube_wms.ogc import app
application = app
| [
"os.environ.setdefault",
"sys.path.append"
] | [((115, 165), 'sys.path.append', 'sys.path.append', (['"""/home/phaesler/src/datacube/wms"""'], {}), "('/home/phaesler/src/datacube/wms')\n", (130, 165), False, 'import sys\n'), ((210, 298), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DATACUBE_CONFIG_PATH"""', '"""/home/phaesler/.datacube.conf.local"""'], {}), "('DATACUBE_CONFIG_PATH',\n '/home/phaesler/.datacube.conf.local')\n", (231, 298), False, 'import os\n')] |
import torch
import random
def get_align_matrix(aligned_ids, sparse=False, device=None, dtype=torch.float32):
"""
Get aligned matrix for feature alignment in sentence embedding
:param aligned_ids: list, aligned_ids[k] means original index of k-th token
:param sparse: whether to return sparse matrix
:param device: device of returned align matrix
:param dtype: dtype of returned align matrix
:return: align_matrix: torch.FloatTensor, shape: (L, L')
Example:
>> aligned_ids = [0, 0, 1, 2, 2, 2]
>> get_align_matrix(aligned_ids)
tensor([[0.5000, 0.5000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.3333, 0.3333, 0.3333]])
"""
l0 = max(aligned_ids) + 1
l1 = len(aligned_ids)
if sparse:
raise NotImplementedError
else:
align_matrix = torch.zeros((l0, l1), dtype=dtype, device=device)
align_matrix[aligned_ids, torch.arange(l1)] = 1
align_matrix = align_matrix / align_matrix.sum(dim=1, keepdim=True)
return align_matrix
def get_all_ngrams(words):
"""
Get all n-grams of words
:param words: list of str
:return: ngrams, list of (list of str)
"""
ngrams = []
N = len(words)
for n in range(1, N + 1):
for i in range(0, N - n + 1):
ngrams.append([words[j] for j in range(i, i + n)])
return ngrams
def random_word_with_token_ids(token_ids, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param token_ids: list of int, list of token id.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
mask_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]
for i, token_id in enumerate(token_ids):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
token_ids[i] = mask_id
# 10% randomly change token to random token
elif prob < 0.9:
token_ids[i] = random.choice(list(tokenizer.vocab.items()))[1]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
output_label.append(token_id)
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return token_ids, output_label
| [
"random.random",
"torch.zeros",
"torch.arange"
] | [((904, 953), 'torch.zeros', 'torch.zeros', (['(l0, l1)'], {'dtype': 'dtype', 'device': 'device'}), '((l0, l1), dtype=dtype, device=device)\n', (915, 953), False, 'import torch\n'), ((1997, 2012), 'random.random', 'random.random', ([], {}), '()\n', (2010, 2012), False, 'import random\n'), ((988, 1004), 'torch.arange', 'torch.arange', (['l1'], {}), '(l1)\n', (1000, 1004), False, 'import torch\n')] |
import tkinter as tk
from main_window import MainWindow
def main() -> None:
root = tk.Tk()
main_window = MainWindow(root)
main_window.mainloop()
if __name__ == '__main__':
main()
| [
"tkinter.Tk",
"main_window.MainWindow"
] | [((89, 96), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (94, 96), True, 'import tkinter as tk\n'), ((115, 131), 'main_window.MainWindow', 'MainWindow', (['root'], {}), '(root)\n', (125, 131), False, 'from main_window import MainWindow\n')] |
import unittest
import sys
sys.path.append('LeetCode/_1051_1100')
sys.path.append('LeetCode.Test')
from _1096_BraceExpansionII import Solution
import AssertHelper
class Test_1096_BraceExpansionII(unittest.TestCase):
def test_braceExpansionII_1(self):
solution = Solution()
result = solution.braceExpansionII("{a,b}{c,{d,e}}")
self.assertSequenceEqual(["ac","ad","ae","bc","bd","be"], result)
def test_braceExpansionII_2(self):
solution = Solution()
result = solution.braceExpansionII("{{a,z},a{b,c},{ab,z}}")
self.assertEqual(["a","ab","ac","z"], result)
| [
"_1096_BraceExpansionII.Solution",
"sys.path.append"
] | [((28, 66), 'sys.path.append', 'sys.path.append', (['"""LeetCode/_1051_1100"""'], {}), "('LeetCode/_1051_1100')\n", (43, 66), False, 'import sys\n'), ((67, 99), 'sys.path.append', 'sys.path.append', (['"""LeetCode.Test"""'], {}), "('LeetCode.Test')\n", (82, 99), False, 'import sys\n'), ((277, 287), '_1096_BraceExpansionII.Solution', 'Solution', ([], {}), '()\n', (285, 287), False, 'from _1096_BraceExpansionII import Solution\n'), ((482, 492), '_1096_BraceExpansionII.Solution', 'Solution', ([], {}), '()\n', (490, 492), False, 'from _1096_BraceExpansionII import Solution\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
from models import lstm as lstm
from data import dataloader
from tqdm import tqdm
args = None
def get_accuracy(logits, labels):
rounded_preds = torch.round(torch.sigmoid(logits))
correct = (rounded_preds == labels).float()
acc = correct.sum()/len(correct)
return acc
def train(model, data, optimizer, criterion):
global args
running_loss = 0.0
total_loss = 0.0
total_acc = 0.0
model.train()
i=0
for batch in tqdm(data):
optimizer.zero_grad()
pred = model(batch.text).squeeze(1)
loss = criterion(pred, batch.label)
acc = get_accuracy(pred, batch.label)
loss.backward()
optimizer.step()
running_loss += loss.item()
total_loss += loss.item()
total_acc += acc.item()
if i % args.log_interval == args.log_interval-1:
print(f"Running Loss : {running_loss/args.log_interval}")
running_loss = 0.0
i+=1
total_loss /= len(data)
total_acc /= len(data)
print(f"Epoch Loss: {total_loss}, Epoch Accuracy: {total_acc}")
return total_loss, total_acc
def validate(model, data, criterion):
model.eval()
total_loss = 0.0
total_acc = 0.0
for i,batch in enumerate(data):
pred = model(batch.text).squeeze(1)
loss = criterion(pred, batch.label)
acc = get_accuracy(pred, batch.label)
total_loss += loss.item()
total_acc += acc.item()
total_loss /= len(data)
total_acc /= len(data)
return total_loss, total_acc
def main(args):
train_loader, val_loader, test_loader, glove_vecs =\
dataloader.get_data(args.batch_size)
if args.checkpoint is not False:
model = torch.load(args.checkpoint)
else:
if args.model == 'lstm':
model = lstm.LSTM(pretrained_emb=glove_vecs)
else:
raise NotImplemented
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1,
verbose=True)
prev_best = -1
for epoch in range(args.epochs):
loss, acc = train(model, train_loader, optimizer, criterion)
vloss, vacc = validate(model, val_loader, criterion)
lr_scheduler.step(vloss)
print(f"Validation Loss: {vloss}, Validation Accuracy: {vacc}")
# checkpoint
if vacc > prev_best:
prev_best = vacc
torch.save(model,
f"models/checkpoints/{args.run_name}-epoch-{epoch}-acc-{vacc}")
print(f"Saving Checkpoint: models/checkpoints/{args.run_name}-epoch-{epoch}-acc-{vacc}")
tloss, tacc = validate(model, test_loader, criterion)
print(f"Test Loss: {tloss}, Test Accuracy: {tacc}")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('run_name')
parser.add_argument('--model', default='lstm')
parser.add_argument('--batch_size', default=45)
parser.add_argument('--checkpoint', default=False)
parser.add_argument('--epochs', default=100)
parser.add_argument('--log_interval', default=50)
args = parser.parse_args()
main(args) | [
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"argparse.ArgumentParser",
"torch.load",
"tqdm.tqdm",
"torch.sigmoid",
"models.lstm.LSTM",
"torch.cuda.is_available",
"torch.save",
"data.dataloader.get_data",
"torch.nn.BCEWithLogitsLoss"
] | [((523, 533), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (527, 533), False, 'from tqdm import tqdm\n'), ((1711, 1747), 'data.dataloader.get_data', 'dataloader.get_data', (['args.batch_size'], {}), '(args.batch_size)\n', (1730, 1747), False, 'from data import dataloader\n'), ((2046, 2068), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2066, 2068), True, 'import torch.nn as nn\n'), ((2191, 2264), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'patience': '(1)', 'verbose': '(True)'}), '(optimizer, patience=1, verbose=True)\n', (2227, 2264), True, 'import torch.optim as optim\n'), ((3066, 3091), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3089, 3091), False, 'import argparse\n'), ((226, 247), 'torch.sigmoid', 'torch.sigmoid', (['logits'], {}), '(logits)\n', (239, 247), False, 'import torch\n'), ((1802, 1829), 'torch.load', 'torch.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (1812, 1829), False, 'import torch\n'), ((1893, 1929), 'models.lstm.LSTM', 'lstm.LSTM', ([], {'pretrained_emb': 'glove_vecs'}), '(pretrained_emb=glove_vecs)\n', (1902, 1929), True, 'from models import lstm as lstm\n'), ((2106, 2131), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2129, 2131), False, 'import torch\n'), ((2689, 2774), 'torch.save', 'torch.save', (['model', 'f"""models/checkpoints/{args.run_name}-epoch-{epoch}-acc-{vacc}"""'], {}), "(model,\n f'models/checkpoints/{args.run_name}-epoch-{epoch}-acc-{vacc}')\n", (2699, 2774), False, 'import torch\n')] |
import pandas as pd
import numpy as np
def read_label_file(label_file):
df = pd.read_csv(label_file)
# --- Define lambda to extract coords in list [ymin, xmin, ymax, xmax]
extract_box = lambda row: [row['y'], row['x'], row['y'] + row['height'], row['x'] + row['width']]
parsed = {}
for n, row in df.iterrows():
# --- Initialize patient entry into parsed
pid = row['patientId']
if pid not in parsed:
parsed[pid] = {
'dicom': '../input/stage_1_train_images/%s.dcm' % pid,
'label': row['Target'],
'boxes': []}
# --- Add box if opacity is present
if parsed[pid]['label'] == 1:
parsed[pid]['boxes'].append(extract_box(row))
for _, val in parsed.items():
val['boxes'] = np.array(val['boxes'])
return parsed
| [
"numpy.array",
"pandas.read_csv"
] | [((83, 106), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {}), '(label_file)\n', (94, 106), True, 'import pandas as pd\n'), ((813, 835), 'numpy.array', 'np.array', (["val['boxes']"], {}), "(val['boxes'])\n", (821, 835), True, 'import numpy as np\n')] |
#!/bin/python3
import os, yaml, subprocess, sys
archive = sys.argv[1]
project_dir = os.path.dirname(os.path.abspath(__file__))
manifest = yaml.load(open('manifest.yml', 'r'))
subprocess.call(project_dir + '/xfce4-shutdown')
for package in manifest['packages']:
manifest_element = manifest['packages'][package]
package_directory = manifest_element['directory']
package_name = manifest_element['package']
if package_directory[0] == '~':
package_directory = package_directory.replace('~', os.environ['HOME'])
print('Deploying ' + package_name)
call = ["rsync", "-rK", archive + "/" + package_name, project_dir + '/configs/' + package_directory]
if 'use_sudo' in manifest_element and manifest_element['use_sudo']:
call.insert(0, 'sudo')
subprocess.call(call)
subprocess.call(project_dir + '/xfce4-startup')
| [
"os.path.abspath",
"subprocess.call"
] | [((179, 227), 'subprocess.call', 'subprocess.call', (["(project_dir + '/xfce4-shutdown')"], {}), "(project_dir + '/xfce4-shutdown')\n", (194, 227), False, 'import os, yaml, subprocess, sys\n'), ((811, 858), 'subprocess.call', 'subprocess.call', (["(project_dir + '/xfce4-startup')"], {}), "(project_dir + '/xfce4-startup')\n", (826, 858), False, 'import os, yaml, subprocess, sys\n'), ((103, 128), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (118, 128), False, 'import os, yaml, subprocess, sys\n'), ((788, 809), 'subprocess.call', 'subprocess.call', (['call'], {}), '(call)\n', (803, 809), False, 'import os, yaml, subprocess, sys\n')] |
import argparse
import gzip
import json
import subprocess as sp
import sys
# Gene ID translation file. Example usage:
# geneIDMap['CFH'] = 'ENSG00000000971.15'
# geneIDMap['ENSG0000000097'] = 'CFH'
geneIDMapFile = (
"/net/amd/amkwong/browseQTL/v2_data/data/gene.id.symbol.map.json.gz"
)
with gzip.open(geneIDMapFile, "rt") as f:
geneIDMap = json.load(f)
parser = argparse.ArgumentParser(
description="Join credible sets with marginal association"
)
parser.add_argument(
"-a",
"--all",
type=str,
required=True,
help="File containing all cis associations (must be tabixed)",
)
parser.add_argument(
"-c",
"--cred",
type=str,
required=True,
help="File containing credible set (must be tabixed)",
)
parser.add_argument(
"-o",
"--out",
type=str,
required=True,
help="Output file name (to be bgzipped)",
)
parser.add_argument(
"-r", "--region", type=str, required=True, help="Genomic region to query"
)
parser.add_argument(
"--tabix",
type=str,
required=False,
default="tabix",
help="Path to binary tabix",
)
parser.add_argument(
"--bgzip",
type=str,
required=False,
default="bgzip",
help="Path to binary bgzip",
)
args = parser.parse_args()
# cisf = "/net/amd/amkwong/browseQTL/v2_data/ebi_ge/1/all.EBI.ge.data.chr1.29000001-30000000.tsv.gz"
# credf = "/net/amd/amkwong/browseQTL/v2_data/credible_sets/ge/chr1.ge.credible_set.tsv.gz"
# outf = "/net/1000g/hmkang/data/spot/credible_sets/joined/ge/1/ge.credible_set.joinged.chr1.29000001-30000000.tsv.gz"
# chrom = "1"
# beg = 29000001
# end = 30000000
# tabix = "tabix"
# bgzip = "bgzip"
## Load credible set per each megabase bin
vid2trait2cred = {} # type: ignore
creds = []
with sp.Popen(
"{args.tabix} {args.cred} {args.region}".format(**locals()),
shell=True,
encoding="utf-8",
stdout=sp.PIPE,
).stdout as fh:
for line in fh:
toks = line.rstrip().split("\t")
(
dataset,
tissue,
trait,
vid,
vchr,
vpos,
vref,
valt,
cs_id,
cs_index,
region,
pip,
z,
cs_min_r2,
cs_avg_r2,
cs_size,
posterior_mean,
posterior_sd,
cs_log10bf,
) = toks
## manual change needed here for BLUEPRINT to fix inconsistencies between credible set and all cis
if (dataset == "BLUEPRINT_SE") or (dataset == "BLUEPRINT_PE"):
dataset = "BLUEPRINT"
toks[0] = "BLUEPRINT"
## manual change needed for van_de_Bunt_2015 to fix inconsistencies between credible set and all cis
# if ( dataset == "van_de_Bunt_2015" ):
# dataset = "van_de_Bunt_2018"
# toks[0] = "van_de_Bunt_2018"
## manual change needed for esophagus_gej to fix inconsistencies between credible set and all cis
# if ( ( dataset == "GTEx" ) and ( tissue.startswith("esophagus_gej") or tissue.startswith("esophagusj") ) ):
# tissue = "esophagus_gej"
# toks[1] = "esophagus_gej"
creds.append(toks)
traitID = ":".join([dataset, tissue, trait])
if vid not in vid2trait2cred:
vid2trait2cred[vid] = {}
if traitID in vid2trait2cred[vid]:
raise ValueError("Duplicate {vid} {traitID}".format(**locals()))
# print("Register {vid} {traitID}".format(**locals()), file=sys.stderr)
vid2trait2cred[vid][traitID] = len(creds) - 1
## Read all cis associations and identify the lines matching to the credible set
vid2trait2cis = {} # type: ignore
with sp.Popen(
"{args.tabix} {args.all} {args.region}".format(**locals()),
shell=True,
encoding="utf-8",
stdout=sp.PIPE,
).stdout as fh:
for line in fh:
toks = line.rstrip().split("\t")
(dataset, tissue, trait, vchr, vpos, vref, valt, vid) = toks[0:8]
traitID = ":".join([dataset, tissue, trait])
toks.append(geneIDMap.get(trait.split(".")[0], "Unknown_gene"))
# FYI - order of toks : (dataset, tissue, trait, vchr, vpos, vref, valt, vid, ma_samples, maf, pvalue, beta, se, vtype, ac, an, r2, mol_trait_obj_id, gid, median_tpm, rsid)
if (vid in vid2trait2cred) and (traitID in vid2trait2cred[vid]):
if vid not in vid2trait2cis:
vid2trait2cis[vid] = {}
## ignore the errors of seeing the sample SNP twice
# if ( traitID in vid2trait2cis[vid] ):
# print(vid2trait2cis[vid],file=sys.stderr)
# print(toks,file=sys.stderr)
# raise ValueError("Duplicate cis {vid} {traitID}".format(**locals()))
vid2trait2cis[vid][traitID] = toks
## write joined
with sp.Popen(
"{args.bgzip} -c > {args.out}".format(**locals()),
shell=True,
encoding="utf-8",
stdin=sp.PIPE,
).stdin as wh:
for i in range(len(creds)):
cred = creds[i]
(dataset, tissue, trait, vid) = cred[0:4]
traitID = ":".join([dataset, tissue, trait])
if (vid not in vid2trait2cis) or (traitID not in vid2trait2cis[vid]):
print(
"WARNING: Could not find match for {vid} and {traitID}".format(
**locals()
),
file=sys.stderr,
)
continue
cis = vid2trait2cis[vid][traitID]
if (
(cred[0] != cis[0])
or (cred[1] != cis[1])
or (cred[2] != cis[2])
or (cred[3] != cis[7])
):
print(cred, file=sys.stderr)
print(cis, file=sys.stderr)
raise ValueError("ERROR: Incompatible lines")
wh.write("\t".join(cred))
wh.write("\t")
wh.write("\t".join(cis[8:]))
wh.write("\n")
| [
"json.load",
"argparse.ArgumentParser",
"gzip.open"
] | [((373, 461), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Join credible sets with marginal association"""'}), "(description=\n 'Join credible sets with marginal association')\n", (396, 461), False, 'import argparse\n'), ((297, 327), 'gzip.open', 'gzip.open', (['geneIDMapFile', '"""rt"""'], {}), "(geneIDMapFile, 'rt')\n", (306, 327), False, 'import gzip\n'), ((350, 362), 'json.load', 'json.load', (['f'], {}), '(f)\n', (359, 362), False, 'import json\n')] |
import json
from typing import List, Dict
from dataclasses import dataclass
@dataclass
class ProgramGraph:
"""Program Graph"""
locations: List[str]
actions: List[str]
effects: Dict[str, List[str]]
hooks: List[List[str]]
initial_locations: List[str]
initial_guard: Dict[str, int]
def read_pg_from_json(file_path: str) -> ProgramGraph:
"""读入Program Graph
:param file_path: 文件路径
"""
with open(file_path, encoding='utf-8') as f:
ok = json.load(f)
return ProgramGraph(ok['Loc'], ok['Act'], ok['Effect'], ok['Hooks'], ok['Loc_0'], ok['g_0'])
| [
"json.load"
] | [((486, 498), 'json.load', 'json.load', (['f'], {}), '(f)\n', (495, 498), False, 'import json\n')] |
from binance import Client
from binance.helpers import round_step_size
from binance.exceptions import BinanceAPIException
import pandas as pd
from numpy import average
import json
SYMBOLS_FILE = 'data/symbols.json'
api_json = pd.read_json("data/api.json", orient="index")
API_KEY = api_json[0]["api_key"]
API_SECRET = api_json[0]["api_secret"]
client = Client(API_KEY, API_SECRET)
class Symbol:
def __init__(self, symbol = ''):
self.symbol = {
"symbol": symbol,
"currency_pair": '',
"average_buy": 0,
"average_sell": 0,
"executed_buy": 0,
"executed_sell": 0,
"net_executed": 0,
"profit": 0,
"global_average": 0,
"commission": 0,
"tick_size": 0,
"step_size": 0
}
def get_symbols(self):
data = []
try:
with open(SYMBOLS_FILE) as json_file:
data = json.load(json_file)
except:
with open(SYMBOLS_FILE, 'w') as outfile:
json.dump([], outfile, indent=2)
return data
def get_symbol(self, symbol_id = None):
symbols = self.get_symbols()
for symbol in symbols:
if(symbol['symbol'] == symbol_id):
return symbol
return None
def remove(self, symbol_id):
data = self.get_symbols()
for symbol in data:
if symbol['symbol'] == symbol_id:
data.remove(symbol)
with open(SYMBOLS_FILE, 'w') as outfile:
json.dump(data, outfile, indent=2)
return True
return False
def save(self, symbol):
try:
self.symbol["symbol"] = symbol
self.Calculate()
data = self.get_symbols()
data.append(self.symbol)
with open(SYMBOLS_FILE, 'w') as outfile:
json.dump(data, outfile, indent=2)
return {
'status': True
}
except BinanceAPIException as e:
return {
'status': False,
'message': e.message
}
def all_update(self):
data = self.get_symbols()
for symbol in data:
self.symbol["symbol"] = symbol["symbol"]
data.remove(symbol)
self.Calculate()
data.append(self.symbol)
with open(SYMBOLS_FILE, 'w') as outfile:
json.dump(data, outfile, indent=2)
def Calculate(self):
self.symbol["currency_pair"] = self.symbol["symbol"] + 'USDT'
trades_df = pd.DataFrame(client.get_my_trades(symbol=self.symbol["currency_pair"]))
symbol_info = client.get_symbol_info(symbol=self.symbol["currency_pair"])
self.symbol["tick_size"] = float(symbol_info['filters'][0]['tickSize'])
self.symbol["step_size"] = float(symbol_info['filters'][2]['stepSize'])
if(trades_df.size != 0):
trades_df = trades_df[trades_df['time'] >= 1633972308381]
trades_df['price'] = trades_df['price'].astype(float)
trades_df['qty'] = trades_df['qty'].astype(float)
trades_df['quoteQty'] = trades_df['quoteQty'].astype(float)
trades_df['commission'] = trades_df['commission'].astype(float)
try:
self.symbol["average_buy"] = round_step_size(average(trades_df[trades_df['isBuyer'] == True]['price'], weights=trades_df[trades_df['isBuyer'] == True]['qty']), self.symbol["tick_size"])
except:
self.symbol["average_buy"] = 0.0
self.symbol["executed_buy"] = round_step_size(trades_df[trades_df['isBuyer'] == True]['qty'].sum(), self.symbol["step_size"])
try:
self.symbol["average_sell"] = round_step_size(average(trades_df[trades_df['isBuyer'] == False]['price'], weights=trades_df[trades_df['isBuyer'] == False]['qty']), self.symbol["tick_size"])
except:
self.symbol["average_sell"] = 0.0
self.symbol["executed_sell"] = round_step_size(trades_df[trades_df['isBuyer'] == False]['qty'].sum(), self.symbol["step_size"])
self.symbol["profit"] = round_step_size(self.symbol["average_sell"]*self.symbol["executed_sell"] - self.symbol["average_buy"]*self.symbol["executed_buy"], self.symbol["tick_size"])
self.symbol["net_executed"] = round_step_size(self.symbol["executed_buy"] - self.symbol["executed_sell"], self.symbol["step_size"])
if(self.symbol["profit"] < 0 and self.symbol["net_executed"] > 0):
self.symbol["global_average"] = round_step_size(abs(self.symbol["profit"]) / self.symbol["net_executed"], self.symbol["tick_size"])
else:
self.symbol["global_average"] = 0
self.symbol["commission"] = round(trades_df['commission'].sum(), 8)
| [
"pandas.read_json",
"binance.Client",
"numpy.average",
"json.load",
"binance.helpers.round_step_size",
"json.dump"
] | [((230, 275), 'pandas.read_json', 'pd.read_json', (['"""data/api.json"""'], {'orient': '"""index"""'}), "('data/api.json', orient='index')\n", (242, 275), True, 'import pandas as pd\n'), ((359, 386), 'binance.Client', 'Client', (['API_KEY', 'API_SECRET'], {}), '(API_KEY, API_SECRET)\n', (365, 386), False, 'from binance import Client\n'), ((2135, 2169), 'json.dump', 'json.dump', (['data', 'outfile'], {'indent': '(2)'}), '(data, outfile, indent=2)\n', (2144, 2169), False, 'import json\n'), ((3753, 3922), 'binance.helpers.round_step_size', 'round_step_size', (["(self.symbol['average_sell'] * self.symbol['executed_sell'] - self.symbol[\n 'average_buy'] * self.symbol['executed_buy'])", "self.symbol['tick_size']"], {}), "(self.symbol['average_sell'] * self.symbol['executed_sell'] -\n self.symbol['average_buy'] * self.symbol['executed_buy'], self.symbol[\n 'tick_size'])\n", (3768, 3922), False, 'from binance.helpers import round_step_size\n'), ((3947, 4052), 'binance.helpers.round_step_size', 'round_step_size', (["(self.symbol['executed_buy'] - self.symbol['executed_sell'])", "self.symbol['step_size']"], {}), "(self.symbol['executed_buy'] - self.symbol['executed_sell'],\n self.symbol['step_size'])\n", (3962, 4052), False, 'from binance.helpers import round_step_size\n'), ((859, 879), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (868, 879), False, 'import json\n'), ((1678, 1712), 'json.dump', 'json.dump', (['data', 'outfile'], {'indent': '(2)'}), '(data, outfile, indent=2)\n', (1687, 1712), False, 'import json\n'), ((947, 979), 'json.dump', 'json.dump', (['[]', 'outfile'], {'indent': '(2)'}), '([], outfile, indent=2)\n', (956, 979), False, 'import json\n'), ((1392, 1426), 'json.dump', 'json.dump', (['data', 'outfile'], {'indent': '(2)'}), '(data, outfile, indent=2)\n', (1401, 1426), False, 'import json\n'), ((2993, 3111), 'numpy.average', 'average', (["trades_df[trades_df['isBuyer'] == True]['price']"], {'weights': "trades_df[trades_df['isBuyer'] == True]['qty']"}), "(trades_df[trades_df['isBuyer'] == True]['price'], weights=trades_df\n [trades_df['isBuyer'] == True]['qty'])\n", (3000, 3111), False, 'from numpy import average\n'), ((3388, 3508), 'numpy.average', 'average', (["trades_df[trades_df['isBuyer'] == False]['price']"], {'weights': "trades_df[trades_df['isBuyer'] == False]['qty']"}), "(trades_df[trades_df['isBuyer'] == False]['price'], weights=\n trades_df[trades_df['isBuyer'] == False]['qty'])\n", (3395, 3508), False, 'from numpy import average\n')] |
#!/usr/bin/env python3
from pathlib import Path
from typing import NoReturn, List, AnyStr
from utils.command import Command
from utils.analyze import DatasetAnalyzer
from utils.plots import Plotter
class Stats(Command):
def __init__(self, src_path: str = None, save: str = None, plots: List[AnyStr] = None, **kwargs):
super().__init__(**kwargs)
self.src_path = src_path if src_path else self.configs.data_paths.processed
self.analyzer = DatasetAnalyzer(src=self.src_path / Path('src-dataset.txt'),
tgt=self.src_path / Path('tgt-dataset.txt'), verbose=self.verbose)
self.plotter = Plotter(save)
mapping = {'zipf': self.zipf, 'hist': self.histogram, 'bars': self.bars}
self.plots = {name: func for name, func in mapping.items() if name in plots} if plots else mapping
def __call__(self, **kwargs):
for name, plot in self.plots.items():
plot()
def zipf(self):
tokens, counts = self.analyzer.token_counts()
self.plotter.zipf_log(tokens, counts)
def histogram(self):
histogram_data = [self.analyzer.tokens_per_line(), self.analyzer.tokens_per_line(tgt=True)]
self.plotter.multi_histogram(data=histogram_data, labels=['source', 'target'], x_label="Number of tokens",
interval=(0, 400), y_label="Frequency", bins_size=100, pdf=True)
def bars(self):
# Number of statements
labels, values = zip(*self.analyzer.hunk_size().items())
self.plotter.bars(values, index=labels, bar_label="statements", y_label="number of samples")
@staticmethod
def definition() -> dict:
return {'name': 'stats',
'command': Stats,
'description': "Tests the model on the test dataset."}
@staticmethod
def add_arguments(cmd_parser) -> NoReturn:
cmd_parser.add_argument('-sp', '--src_path', help='Source dataset path.', type=str, required=None)
cmd_parser.add_argument('--save', help='Saves the plots to specified path.', type=str, required=False)
cmd_parser.add_argument('--plots', help='Flag for specifying the available plots to be shown.',
choices=['zipf', 'hist', 'bars'],
default=None)
| [
"utils.plots.Plotter",
"pathlib.Path"
] | [((660, 673), 'utils.plots.Plotter', 'Plotter', (['save'], {}), '(save)\n', (667, 673), False, 'from utils.plots import Plotter\n'), ((505, 528), 'pathlib.Path', 'Path', (['"""src-dataset.txt"""'], {}), "('src-dataset.txt')\n", (509, 528), False, 'from pathlib import Path\n'), ((590, 613), 'pathlib.Path', 'Path', (['"""tgt-dataset.txt"""'], {}), "('tgt-dataset.txt')\n", (594, 613), False, 'from pathlib import Path\n')] |
"""Test file for numpy tracing"""
from copy import deepcopy
import numpy
import pytest
from concrete.common.data_types.floats import Float
from concrete.common.data_types.integers import Integer
from concrete.common.representation import intermediate as ir
from concrete.common.values import EncryptedScalar, EncryptedTensor
from concrete.numpy import tracing
OPERATIONS_TO_TEST = [ir.Add, ir.Sub, ir.Mul]
# Functions from tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC, whose output
# is a float64, whatever the input type
LIST_OF_UFUNC_WHOSE_OUTPUT_IS_FLOAT64 = set(
[
numpy.arccos,
numpy.arccosh,
numpy.arcsin,
numpy.arcsinh,
numpy.arctan,
numpy.arctanh,
numpy.cbrt,
numpy.ceil,
numpy.cos,
numpy.cosh,
numpy.deg2rad,
numpy.degrees,
numpy.exp,
numpy.exp2,
numpy.expm1,
numpy.fabs,
numpy.floor,
numpy.log,
numpy.log10,
numpy.log1p,
numpy.log2,
numpy.rad2deg,
numpy.radians,
numpy.rint,
numpy.sin,
numpy.sinh,
numpy.spacing,
numpy.sqrt,
numpy.tan,
numpy.tanh,
numpy.trunc,
]
)
# Functions from tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC, whose output
# is a boolean, whatever the input type
LIST_OF_UFUNC_WHOSE_OUTPUT_IS_BOOL = set(
[
numpy.isfinite,
numpy.isinf,
numpy.isnan,
numpy.signbit,
numpy.logical_not,
]
)
@pytest.mark.parametrize(
"inputs,expected_output_node",
[
pytest.param(
{"x": EncryptedScalar(Integer(7, is_signed=False))},
ir.GenericFunction,
),
pytest.param(
{"x": EncryptedScalar(Integer(32, is_signed=True))},
ir.GenericFunction,
),
pytest.param(
{"x": EncryptedScalar(Integer(64, is_signed=True))},
ir.GenericFunction,
),
pytest.param(
{"x": EncryptedScalar(Integer(128, is_signed=True))},
ir.GenericFunction,
marks=pytest.mark.xfail(strict=True, raises=NotImplementedError),
),
pytest.param(
{"x": EncryptedScalar(Float(64))},
ir.GenericFunction,
),
],
)
@pytest.mark.parametrize(
"function_to_trace_def",
[f for f in tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC if f.nin == 1],
)
def test_trace_numpy_supported_unary_ufuncs(inputs, expected_output_node, function_to_trace_def):
"""Function to trace supported numpy ufuncs"""
# We really need a lambda (because numpy functions are not playing
# nice with inspect.signature), but pylint and flake8 are not happy
# with it
# pylint: disable=cell-var-from-loop
function_to_trace = lambda x: function_to_trace_def(x) # noqa: E731
# pylint: enable=cell-var-from-loop
op_graph = tracing.trace_numpy_function(function_to_trace, inputs)
assert len(op_graph.output_nodes) == 1
assert isinstance(op_graph.output_nodes[0], expected_output_node)
assert len(op_graph.output_nodes[0].outputs) == 1
if function_to_trace_def in LIST_OF_UFUNC_WHOSE_OUTPUT_IS_FLOAT64:
assert op_graph.output_nodes[0].outputs[0] == EncryptedScalar(Float(64))
elif function_to_trace_def in LIST_OF_UFUNC_WHOSE_OUTPUT_IS_BOOL:
# Boolean function
assert op_graph.output_nodes[0].outputs[0] == EncryptedScalar(Integer(8, is_signed=False))
else:
# Function keeping more or less input type
input_node_type = inputs["x"]
expected_output_node_type = deepcopy(input_node_type)
expected_output_node_type.dtype.bit_width = max(
expected_output_node_type.dtype.bit_width, 32
)
assert op_graph.output_nodes[0].outputs[0] == expected_output_node_type
@pytest.mark.parametrize("np_function", tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC)
def test_nptracer_get_tracing_func_for_np_functions(np_function):
"""Test NPTracer get_tracing_func_for_np_function"""
expected_tracing_func = tracing.NPTracer.UFUNC_ROUTING[np_function]
assert tracing.NPTracer.get_tracing_func_for_np_function(np_function) == expected_tracing_func
def subtest_tracing_calls(
function_to_trace,
input_value_input_and_expected_output_tuples,
check_array_equality,
):
"""Test memory function managed by GenericFunction node of the form numpy.something"""
for input_value, input_, expected_output in input_value_input_and_expected_output_tuples:
op_graph = tracing.trace_numpy_function(function_to_trace, {"x": input_value})
output_node = op_graph.output_nodes[0]
node_results = op_graph.evaluate({0: input_})
evaluated_output = node_results[output_node]
assert isinstance(evaluated_output, type(expected_output)), type(evaluated_output)
check_array_equality(evaluated_output, expected_output)
@pytest.mark.parametrize(
"function_to_trace,input_value_input_and_expected_output_tuples",
[
(
lambda x: numpy.transpose(x),
[
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4).reshape(2, 2),
numpy.array([[0, 2], [1, 3]]),
),
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4, 8).reshape(2, 2),
numpy.array([[4, 6], [5, 7]]),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.int64(42),
),
],
),
(
lambda x: numpy.transpose(x) + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(3, 5).transpose(),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.int64(84),
),
],
),
(
lambda x: numpy.ravel(x),
[
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4).reshape(2, 2),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.array([42], dtype=numpy.int64),
),
],
),
(
lambda x: numpy.reshape(x, (5, 3)) + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(5, 3),
),
],
),
],
)
def test_tracing_numpy_calls(
function_to_trace,
input_value_input_and_expected_output_tuples,
check_array_equality,
):
"""Test memory function managed by GenericFunction node of the form numpy.something"""
subtest_tracing_calls(
function_to_trace, input_value_input_and_expected_output_tuples, check_array_equality
)
@pytest.mark.parametrize(
"function_to_trace,input_value_input_and_expected_output_tuples",
[
(
lambda x: x.transpose() + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(3, 5).transpose(),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.int64(84),
),
],
),
(
lambda x: x.ravel(),
[
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4).reshape(2, 2),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.array([42], dtype=numpy.int64),
),
],
),
(
lambda x: x.reshape((5, 3)) + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(5, 3),
),
],
),
pytest.param(
lambda x: x.reshape((5, 3)),
[
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
None,
)
],
marks=pytest.mark.xfail(strict=True, raises=ValueError),
),
],
)
def test_tracing_ndarray_calls(
function_to_trace,
input_value_input_and_expected_output_tuples,
check_array_equality,
):
"""Test memory function managed by GenericFunction node of the form ndarray.something"""
subtest_tracing_calls(
function_to_trace, input_value_input_and_expected_output_tuples, check_array_equality
)
| [
"concrete.common.data_types.floats.Float",
"numpy.int64",
"numpy.reshape",
"pytest.mark.xfail",
"concrete.common.data_types.integers.Integer",
"pytest.mark.parametrize",
"numpy.array",
"concrete.numpy.tracing.trace_numpy_function",
"copy.deepcopy",
"concrete.numpy.tracing.NPTracer.get_tracing_func_for_np_function",
"numpy.transpose",
"numpy.ravel",
"numpy.arange"
] | [((2307, 2429), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""function_to_trace_def"""', '[f for f in tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC if f.nin == 1]'], {}), "('function_to_trace_def', [f for f in tracing.\n NPTracer.LIST_OF_SUPPORTED_UFUNC if f.nin == 1])\n", (2330, 2429), False, 'import pytest\n'), ((3860, 3945), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""np_function"""', 'tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC'], {}), "('np_function', tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC\n )\n", (3883, 3945), False, 'import pytest\n'), ((2913, 2968), 'concrete.numpy.tracing.trace_numpy_function', 'tracing.trace_numpy_function', (['function_to_trace', 'inputs'], {}), '(function_to_trace, inputs)\n', (2941, 2968), False, 'from concrete.numpy import tracing\n'), ((4149, 4211), 'concrete.numpy.tracing.NPTracer.get_tracing_func_for_np_function', 'tracing.NPTracer.get_tracing_func_for_np_function', (['np_function'], {}), '(np_function)\n', (4198, 4211), False, 'from concrete.numpy import tracing\n'), ((4573, 4640), 'concrete.numpy.tracing.trace_numpy_function', 'tracing.trace_numpy_function', (['function_to_trace', "{'x': input_value}"], {}), "(function_to_trace, {'x': input_value})\n", (4601, 4640), False, 'from concrete.numpy import tracing\n'), ((3624, 3649), 'copy.deepcopy', 'deepcopy', (['input_node_type'], {}), '(input_node_type)\n', (3632, 3649), False, 'from copy import deepcopy\n'), ((3279, 3288), 'concrete.common.data_types.floats.Float', 'Float', (['(64)'], {}), '(64)\n', (3284, 3288), False, 'from concrete.common.data_types.floats import Float\n'), ((2114, 2172), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'NotImplementedError'}), '(strict=True, raises=NotImplementedError)\n', (2131, 2172), False, 'import pytest\n'), ((5087, 5105), 'numpy.transpose', 'numpy.transpose', (['x'], {}), '(x)\n', (5102, 5105), False, 'import numpy\n'), ((6329, 6343), 'numpy.ravel', 'numpy.ravel', (['x'], {}), '(x)\n', (6340, 6343), False, 'import numpy\n'), ((9632, 9681), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'ValueError'}), '(strict=True, raises=ValueError)\n', (9649, 9681), False, 'import pytest\n'), ((3458, 3485), 'concrete.common.data_types.integers.Integer', 'Integer', (['(8)'], {'is_signed': '(False)'}), '(8, is_signed=False)\n', (3465, 3485), False, 'from concrete.common.data_types.integers import Integer\n'), ((1642, 1669), 'concrete.common.data_types.integers.Integer', 'Integer', (['(7)'], {'is_signed': '(False)'}), '(7, is_signed=False)\n', (1649, 1669), False, 'from concrete.common.data_types.integers import Integer\n'), ((1772, 1799), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)'], {'is_signed': '(True)'}), '(32, is_signed=True)\n', (1779, 1799), False, 'from concrete.common.data_types.integers import Integer\n'), ((1902, 1929), 'concrete.common.data_types.integers.Integer', 'Integer', (['(64)'], {'is_signed': '(True)'}), '(64, is_signed=True)\n', (1909, 1929), False, 'from concrete.common.data_types.integers import Integer\n'), ((2032, 2060), 'concrete.common.data_types.integers.Integer', 'Integer', (['(128)'], {'is_signed': '(True)'}), '(128, is_signed=True)\n', (2039, 2060), False, 'from concrete.common.data_types.integers import Integer\n'), ((2241, 2250), 'concrete.common.data_types.floats.Float', 'Float', (['(64)'], {}), '(64)\n', (2246, 2250), False, 'from concrete.common.data_types.floats import Float\n'), ((5290, 5319), 'numpy.array', 'numpy.array', (['[[0, 2], [1, 3]]'], {}), '([[0, 2], [1, 3]])\n', (5301, 5319), False, 'import numpy\n'), ((5512, 5541), 'numpy.array', 'numpy.array', (['[[4, 6], [5, 7]]'], {}), '([[4, 6], [5, 7]])\n', (5523, 5541), False, 'import numpy\n'), ((5676, 5691), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (5687, 5691), False, 'import numpy\n'), ((5713, 5728), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (5724, 5728), False, 'import numpy\n'), ((5807, 5825), 'numpy.transpose', 'numpy.transpose', (['x'], {}), '(x)\n', (5822, 5825), False, 'import numpy\n'), ((6198, 6213), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (6209, 6213), False, 'import numpy\n'), ((6235, 6250), 'numpy.int64', 'numpy.int64', (['(84)'], {}), '(84)\n', (6246, 6250), False, 'import numpy\n'), ((6477, 6492), 'numpy.arange', 'numpy.arange', (['(4)'], {}), '(4)\n', (6489, 6492), False, 'import numpy\n'), ((6514, 6539), 'numpy.array', 'numpy.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (6525, 6539), False, 'import numpy\n'), ((6729, 6754), 'numpy.array', 'numpy.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (6740, 6754), False, 'import numpy\n'), ((6889, 6904), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (6900, 6904), False, 'import numpy\n'), ((6926, 6962), 'numpy.array', 'numpy.array', (['[42]'], {'dtype': 'numpy.int64'}), '([42], dtype=numpy.int64)\n', (6937, 6962), False, 'import numpy\n'), ((7041, 7065), 'numpy.reshape', 'numpy.reshape', (['x', '(5, 3)'], {}), '(x, (5, 3))\n', (7054, 7065), False, 'import numpy\n'), ((8219, 8234), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (8230, 8234), False, 'import numpy\n'), ((8256, 8271), 'numpy.int64', 'numpy.int64', (['(84)'], {}), '(84)\n', (8267, 8271), False, 'import numpy\n'), ((8493, 8508), 'numpy.arange', 'numpy.arange', (['(4)'], {}), '(4)\n', (8505, 8508), False, 'import numpy\n'), ((8530, 8555), 'numpy.array', 'numpy.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (8541, 8555), False, 'import numpy\n'), ((8745, 8770), 'numpy.array', 'numpy.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (8756, 8770), False, 'import numpy\n'), ((8905, 8920), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (8916, 8920), False, 'import numpy\n'), ((8942, 8978), 'numpy.array', 'numpy.array', (['[42]'], {'dtype': 'numpy.int64'}), '([42], dtype=numpy.int64)\n', (8953, 8978), False, 'import numpy\n'), ((9538, 9553), 'numpy.int64', 'numpy.int64', (['(42)'], {}), '(42)\n', (9549, 9553), False, 'import numpy\n'), ((5175, 5202), 'concrete.common.data_types.integers.Integer', 'Integer', (['(4)'], {'is_signed': '(False)'}), '(4, is_signed=False)\n', (5182, 5202), False, 'from concrete.common.data_types.integers import Integer\n'), ((5394, 5421), 'concrete.common.data_types.integers.Integer', 'Integer', (['(4)'], {'is_signed': '(False)'}), '(4, is_signed=False)\n', (5401, 5421), False, 'from concrete.common.data_types.integers import Integer\n'), ((5616, 5643), 'concrete.common.data_types.integers.Integer', 'Integer', (['(6)'], {'is_signed': '(False)'}), '(6, is_signed=False)\n', (5623, 5643), False, 'from concrete.common.data_types.integers import Integer\n'), ((5900, 5928), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)'], {'is_signed': '(False)'}), '(32, is_signed=False)\n', (5907, 5928), False, 'from concrete.common.data_types.integers import Integer\n'), ((6138, 6165), 'concrete.common.data_types.integers.Integer', 'Integer', (['(6)'], {'is_signed': '(False)'}), '(6, is_signed=False)\n', (6145, 6165), False, 'from concrete.common.data_types.integers import Integer\n'), ((6413, 6440), 'concrete.common.data_types.integers.Integer', 'Integer', (['(4)'], {'is_signed': '(False)'}), '(4, is_signed=False)\n', (6420, 6440), False, 'from concrete.common.data_types.integers import Integer\n'), ((6614, 6641), 'concrete.common.data_types.integers.Integer', 'Integer', (['(4)'], {'is_signed': '(False)'}), '(4, is_signed=False)\n', (6621, 6641), False, 'from concrete.common.data_types.integers import Integer\n'), ((6829, 6856), 'concrete.common.data_types.integers.Integer', 'Integer', (['(6)'], {'is_signed': '(False)'}), '(6, is_signed=False)\n', (6836, 6856), False, 'from concrete.common.data_types.integers import Integer\n'), ((7140, 7168), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)'], {'is_signed': '(False)'}), '(32, is_signed=False)\n', (7147, 7168), False, 'from concrete.common.data_types.integers import Integer\n'), ((7921, 7949), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)'], {'is_signed': '(False)'}), '(32, is_signed=False)\n', (7928, 7949), False, 'from concrete.common.data_types.integers import Integer\n'), ((8159, 8186), 'concrete.common.data_types.integers.Integer', 'Integer', (['(6)'], {'is_signed': '(False)'}), '(6, is_signed=False)\n', (8166, 8186), False, 'from concrete.common.data_types.integers import Integer\n'), ((8429, 8456), 'concrete.common.data_types.integers.Integer', 'Integer', (['(4)'], {'is_signed': '(False)'}), '(4, is_signed=False)\n', (8436, 8456), False, 'from concrete.common.data_types.integers import Integer\n'), ((8630, 8657), 'concrete.common.data_types.integers.Integer', 'Integer', (['(4)'], {'is_signed': '(False)'}), '(4, is_signed=False)\n', (8637, 8657), False, 'from concrete.common.data_types.integers import Integer\n'), ((8845, 8872), 'concrete.common.data_types.integers.Integer', 'Integer', (['(6)'], {'is_signed': '(False)'}), '(6, is_signed=False)\n', (8852, 8872), False, 'from concrete.common.data_types.integers import Integer\n'), ((9149, 9177), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)'], {'is_signed': '(False)'}), '(32, is_signed=False)\n', (9156, 9177), False, 'from concrete.common.data_types.integers import Integer\n'), ((9478, 9505), 'concrete.common.data_types.integers.Integer', 'Integer', (['(6)'], {'is_signed': '(False)'}), '(6, is_signed=False)\n', (9485, 9505), False, 'from concrete.common.data_types.integers import Integer\n'), ((5239, 5254), 'numpy.arange', 'numpy.arange', (['(4)'], {}), '(4)\n', (5251, 5254), False, 'import numpy\n'), ((5458, 5476), 'numpy.arange', 'numpy.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (5470, 5476), False, 'import numpy\n'), ((5965, 5981), 'numpy.arange', 'numpy.arange', (['(15)'], {}), '(15)\n', (5977, 5981), False, 'import numpy\n'), ((6678, 6693), 'numpy.arange', 'numpy.arange', (['(4)'], {}), '(4)\n', (6690, 6693), False, 'import numpy\n'), ((7205, 7221), 'numpy.arange', 'numpy.arange', (['(15)'], {}), '(15)\n', (7217, 7221), False, 'import numpy\n'), ((7257, 7277), 'numpy.arange', 'numpy.arange', (['(42)', '(57)'], {}), '(42, 57)\n', (7269, 7277), False, 'import numpy\n'), ((7986, 8002), 'numpy.arange', 'numpy.arange', (['(15)'], {}), '(15)\n', (7998, 8002), False, 'import numpy\n'), ((8694, 8709), 'numpy.arange', 'numpy.arange', (['(4)'], {}), '(4)\n', (8706, 8709), False, 'import numpy\n'), ((9214, 9230), 'numpy.arange', 'numpy.arange', (['(15)'], {}), '(15)\n', (9226, 9230), False, 'import numpy\n'), ((9266, 9286), 'numpy.arange', 'numpy.arange', (['(42)', '(57)'], {}), '(42, 57)\n', (9278, 9286), False, 'import numpy\n'), ((6017, 6037), 'numpy.arange', 'numpy.arange', (['(42)', '(57)'], {}), '(42, 57)\n', (6029, 6037), False, 'import numpy\n'), ((8038, 8058), 'numpy.arange', 'numpy.arange', (['(42)', '(57)'], {}), '(42, 57)\n', (8050, 8058), False, 'import numpy\n')] |
#!/usr/bin/env python
import os
import sys
from datetime import datetime
import time
import tensorflow as tf
import numpy as np
import cPickle as pickle
import cifar100
import resnet_split as resnet
# Dataset Configuration
tf.app.flags.DEFINE_string('data_dir', './cifar100/train_val_split', """Path to the CIFAR-100 data.""")
tf.app.flags.DEFINE_integer('num_classes', 100, """Number of classes in the dataset.""")
tf.app.flags.DEFINE_integer('num_test_instance', 10000, """Number of test images.""")
# Network Configuration
tf.app.flags.DEFINE_integer('batch_size', 100, """Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('num_residual_units', 2, """Number of residual block per group.
Total number of conv layers will be 6n+4""")
tf.app.flags.DEFINE_integer('k', 8, """Network width multiplier""")
tf.app.flags.DEFINE_integer('ngroups1', 1, """Grouping number on logits""")
tf.app.flags.DEFINE_integer('ngroups2', 1, """Grouping number on unit_3_x""")
tf.app.flags.DEFINE_integer('ngroups3', 1, """Grouping number on unit_2_x""")
# Optimization Configuration
tf.app.flags.DEFINE_float('l2_weight', 0.0001, """L2 loss weight applied all the weights""")
tf.app.flags.DEFINE_float('momentum', 0.9, """The momentum of MomentumOptimizer""")
tf.app.flags.DEFINE_float('initial_lr', 0.1, """Initial learning rate""")
tf.app.flags.DEFINE_string('lr_step_epoch', "80.0,120.0,160.0", """Epochs after which learing rate decays""")
tf.app.flags.DEFINE_float('lr_decay', 0.1, """Learning rate decay factor""")
tf.app.flags.DEFINE_boolean('finetune', False, """Whether to finetune.""")
# Evaluation Configuration
tf.app.flags.DEFINE_string('basemodel', './group/model.ckpt-199999', """Base model to load paramters""")
tf.app.flags.DEFINE_string('checkpoint', './split/model.ckpt-149999', """Path to the model checkpoint file""")
tf.app.flags.DEFINE_string('output_file', './split/eval.pkl', """Path to the result pkl file""")
tf.app.flags.DEFINE_integer('test_iter', 100, """Number of test batches during the evaluation""")
tf.app.flags.DEFINE_integer('display', 10, """Number of iterations to display training info.""")
tf.app.flags.DEFINE_float('gpu_fraction', 0.95, """The fraction of GPU memory to be allocated""")
tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""")
FLAGS = tf.app.flags.FLAGS
def get_lr(initial_lr, lr_decay, lr_decay_steps, global_step):
lr = initial_lr
for s in lr_decay_steps:
if global_step >= s:
lr *= lr_decay
return lr
def train():
print('[Dataset Configuration]')
print('\tCIFAR-100 dir: %s' % FLAGS.data_dir)
print('\tNumber of classes: %d' % FLAGS.num_classes)
print('\tNumber of test images: %d' % FLAGS.num_test_instance)
print('[Network Configuration]')
print('\tBatch size: %d' % FLAGS.batch_size)
print('\tResidual blocks per group: %d' % FLAGS.num_residual_units)
print('\tNetwork width multiplier: %d' % FLAGS.k)
print('\tNumber of Groups: %d-%d-%d' % (FLAGS.ngroups3, FLAGS.ngroups2, FLAGS.ngroups1))
print('\tBasemodel file: %s' % FLAGS.basemodel)
print('[Evaluation Configuration]')
print('\tCheckpoint file: %s' % FLAGS.checkpoint)
print('\tOutput file path: %s' % FLAGS.output_file)
print('\tTest iterations: %d' % FLAGS.test_iter)
print('\tSteps per displaying info: %d' % FLAGS.display)
print('\tGPU memory fraction: %f' % FLAGS.gpu_fraction)
print('\tLog device placement: %d' % FLAGS.log_device_placement)
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False, name='global_step')
# Get images and labels of CIFAR-100
print('Load CIFAR-100 dataset')
test_dataset_path = os.path.join(FLAGS.data_dir, 'test')
with tf.variable_scope('test_image'):
cifar100_test = cifar100.CIFAR100Runner(test_dataset_path, image_per_thread=1,
shuffle=False, distort=False, capacity=5000)
test_images, test_labels = cifar100_test.get_inputs(FLAGS.batch_size)
# Build a Graph that computes the predictions from the inference model.
images = tf.placeholder(tf.float32, [FLAGS.batch_size, cifar100.IMAGE_SIZE, cifar100.IMAGE_SIZE, 3])
labels = tf.placeholder(tf.int32, [FLAGS.batch_size])
# Get splitted params
if not FLAGS.basemodel:
print('No basemodel found to load split params')
sys.exit(-1)
else:
print('Load split params from %s' % FLAGS.basemodel)
def get_perms(q_name, ngroups):
split_alpha = reader.get_tensor(q_name+'/alpha')
q_amax = np.argmax(split_alpha, axis=0)
return [np.where(q_amax == i)[0] for i in range(ngroups)]
reader = tf.train.NewCheckpointReader(FLAGS.basemodel)
split_params = {}
print('\tlogits...')
base_logits_w = reader.get_tensor('logits/fc/weights')
base_logits_b = reader.get_tensor('logits/fc/biases')
split_p1_idxs = get_perms('group/split_p1', FLAGS.ngroups1)
split_q1_idxs = get_perms('group/split_q1', FLAGS.ngroups1)
logits_params = {'weights':[], 'biases':[], 'input_perms':[], 'output_perms':[]}
for i in range(FLAGS.ngroups1):
logits_params['weights'].append(base_logits_w[split_p1_idxs[i], :][:, split_q1_idxs[i]])
logits_params['biases'].append(base_logits_b[split_q1_idxs[i]])
logits_params['input_perms'] = split_p1_idxs
logits_params['output_perms'] = split_q1_idxs
split_params['logits'] = logits_params
if FLAGS.ngroups2 > 1:
print('\tunit_3_x...')
base_unit_3_0_shortcut_k = reader.get_tensor('unit_3_0/shortcut/kernel')
base_unit_3_0_conv1_k = reader.get_tensor('unit_3_0/conv_1/kernel')
base_unit_3_0_conv2_k = reader.get_tensor('unit_3_0/conv_2/kernel')
base_unit_3_1_conv1_k = reader.get_tensor('unit_3_1/conv_1/kernel')
base_unit_3_1_conv2_k = reader.get_tensor('unit_3_1/conv_2/kernel')
split_p2_idxs = get_perms('group/split_p2', FLAGS.ngroups2)
split_q2_idxs = _merge_split_idxs(split_p1_idxs, _get_even_merge_idxs(FLAGS.ngroups1, FLAGS.ngroups2))
split_r21_idxs = get_perms('group/split_r21', FLAGS.ngroups2)
split_r22_idxs = get_perms('group/split_r22', FLAGS.ngroups2)
unit_3_0_params = {'shortcut':[], 'conv1':[], 'conv2':[], 'p_perms':[], 'q_perms':[], 'r_perms':[]}
for i in range(FLAGS.ngroups2):
unit_3_0_params['shortcut'].append(base_unit_3_0_shortcut_k[:,:,split_p2_idxs[i],:][:,:,:,split_q2_idxs[i]])
unit_3_0_params['conv1'].append(base_unit_3_0_conv1_k[:,:,split_p2_idxs[i],:][:,:,:,split_r21_idxs[i]])
unit_3_0_params['conv2'].append(base_unit_3_0_conv2_k[:,:,split_r21_idxs[i],:][:,:,:,split_q2_idxs[i]])
unit_3_0_params['p_perms'] = split_p2_idxs
unit_3_0_params['q_perms'] = split_q2_idxs
unit_3_0_params['r_perms'] = split_r21_idxs
split_params['unit_3_0'] = unit_3_0_params
unit_3_1_params = {'conv1':[], 'conv2':[], 'p_perms':[], 'r_perms':[]}
for i in range(FLAGS.ngroups2):
unit_3_1_params['conv1'].append(base_unit_3_1_conv1_k[:,:,split_q2_idxs[i],:][:,:,:,split_r22_idxs[i]])
unit_3_1_params['conv2'].append(base_unit_3_1_conv2_k[:,:,split_r22_idxs[i],:][:,:,:,split_q2_idxs[i]])
unit_3_1_params['p_perms'] = split_q2_idxs
unit_3_1_params['r_perms'] = split_r22_idxs
split_params['unit_3_1'] = unit_3_1_params
if FLAGS.ngroups3 > 1:
print('\tconv4_x...')
base_unit_2_0_shortcut_k = reader.get_tensor('unit_2_0/shortcut/kernel')
base_unit_2_0_conv1_k = reader.get_tensor('unit_2_0/conv_1/kernel')
base_unit_2_0_conv2_k = reader.get_tensor('unit_2_0/conv_2/kernel')
base_unit_2_1_conv1_k = reader.get_tensor('unit_2_1/conv_1/kernel')
base_unit_2_1_conv2_k = reader.get_tensor('unit_2_1/conv_2/kernel')
split_p3_idxs = get_perms('group/split_p3', FLAGS.ngroups3)
split_q3_idxs = _merge_split_idxs(split_p2_idxs, _get_even_merge_idxs(FLAGS.ngroups2, FLAGS.ngroups3))
split_r31_idxs = get_perms('group/split_r31', FLAGS.ngroups3)
split_r32_idxs = get_perms('group/split_r32', FLAGS.ngroups3)
unit_2_0_params = {'shortcut':[], 'conv1':[], 'conv2':[], 'p_perms':[], 'q_perms':[], 'r_perms':[]}
for i in range(FLAGS.ngroups3):
unit_2_0_params['shortcut'].append(base_unit_2_0_shortcut_k[:,:,split_p3_idxs[i],:][:,:,:,split_q3_idxs[i]])
unit_2_0_params['conv1'].append(base_unit_2_0_conv1_k[:,:,split_p3_idxs[i],:][:,:,:,split_r31_idxs[i]])
unit_2_0_params['conv2'].append(base_unit_2_0_conv2_k[:,:,split_r31_idxs[i],:][:,:,:,split_q3_idxs[i]])
unit_2_0_params['p_perms'] = split_p3_idxs
unit_2_0_params['q_perms'] = split_q3_idxs
unit_2_0_params['r_perms'] = split_r31_idxs
split_params['unit_2_0'] = unit_2_0_params
unit_2_1_params = {'conv1':[], 'conv2':[], 'p_perms':[], 'r_perms':[]}
for i in range(FLAGS.ngroups3):
unit_2_1_params['conv1'].append(base_unit_2_1_conv1_k[:,:,split_q3_idxs[i],:][:,:,:,split_r32_idxs[i]])
unit_2_1_params['conv2'].append(base_unit_2_1_conv2_k[:,:,split_r32_idxs[i],:][:,:,:,split_q3_idxs[i]])
unit_2_1_params['p_perms'] = split_q3_idxs
unit_2_1_params['r_perms'] = split_r32_idxs
split_params['unit_2_1'] = unit_2_1_params
# Build model
hp = resnet.HParams(batch_size=FLAGS.batch_size,
num_classes=FLAGS.num_classes,
num_residual_units=FLAGS.num_residual_units,
k=FLAGS.k,
weight_decay=FLAGS.l2_weight,
ngroups1=FLAGS.ngroups1,
ngroups2=FLAGS.ngroups2,
ngroups3=FLAGS.ngroups3,
split_params=split_params,
momentum=FLAGS.momentum,
finetune=FLAGS.finetune)
network = resnet.ResNet(hp, images, labels, global_step)
network.build_model()
print('Number of Weights: %d' % network._weights)
print('FLOPs: %d' % network._flops)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction),
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
'''debugging attempt
from tensorflow.python import debug as tf_debug
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
def _get_data(datum, tensor):
return tensor == train_images
sess.add_tensor_filter("get_data", _get_data)
'''
sess.run(init)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=10000)
if FLAGS.checkpoint is not None:
saver.restore(sess, FLAGS.checkpoint)
print('Load checkpoint %s' % FLAGS.checkpoint)
else:
print('No checkpoint file found.')
sys.exit(1)
# Start queue runners & summary_writer
cifar100_test.start_threads(sess, n_threads=1)
# Test!
test_loss = 0.0
test_acc = 0.0
test_time = 0.0
confusion_matrix = np.zeros((FLAGS.num_classes, FLAGS.num_classes), dtype=np.int32)
for i in range(FLAGS.test_iter):
test_images_val, test_labels_val = sess.run([test_images, test_labels])
start_time = time.time()
loss_value, acc_value, pred_value = sess.run([network.loss, network.acc, network.preds],
feed_dict={network.is_train:False, images:test_images_val, labels:test_labels_val})
duration = time.time() - start_time
test_loss += loss_value
test_acc += acc_value
test_time += duration
for l, p in zip(test_labels_val, pred_value):
confusion_matrix[l, p] += 1
if i % FLAGS.display == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: iter %d, loss=%.4f, acc=%.4f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now(), i, loss_value, acc_value,
examples_per_sec, sec_per_batch))
test_loss /= FLAGS.test_iter
test_acc /= FLAGS.test_iter
# Print and save results
sec_per_image = test_time/FLAGS.test_iter/FLAGS.batch_size
print ('Done! Acc: %.6f, Test time: %.3f sec, %.7f sec/example' % (test_acc, test_time, sec_per_image))
print ('Saving result... ')
result = {'accuracy': test_acc, 'confusion_matrix': confusion_matrix,
'test_time': test_time, 'sec_per_image': sec_per_image}
with open(FLAGS.output_file, 'wb') as fd:
pickle.dump(result, fd)
print ('done!')
def _merge_split_q(q, merge_idxs, name='merge'):
ngroups, dim = q.shape
max_idx = np.max(merge_idxs)
temp_list = []
for i in range(max_idx + 1):
temp = []
for j in range(ngroups):
if merge_idxs[j] == i:
temp.append(q[j,:])
temp_list.append(np.sum(temp, axis=0))
ret = np.array(temp_list)
return ret
def _merge_split_idxs(split_idxs, merge_idxs, name='merge'):
ngroups = len(split_idxs)
max_idx = np.max(merge_idxs)
ret = []
for i in range(max_idx + 1):
temp = []
for j in range(ngroups):
if merge_idxs[j] == i:
temp.append(split_idxs[j])
ret.append(np.concatenate(temp))
return ret
def _get_even_merge_idxs(N, split):
assert N >= split
num_elems = [(N + split - i - 1)/split for i in range(split)]
expand_split = [[i] * n for i, n in enumerate(num_elems)]
return [t for l in expand_split for t in l]
def main(argv=None): # pylint: disable=unused-argument
train()
if __name__ == '__main__':
tf.app.run()
| [
"numpy.array",
"sys.exit",
"tensorflow.GPUOptions",
"tensorflow.app.run",
"tensorflow.Graph",
"numpy.where",
"tensorflow.placeholder",
"numpy.max",
"numpy.concatenate",
"tensorflow.app.flags.DEFINE_boolean",
"cPickle.dump",
"tensorflow.variable_scope",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.Variable",
"resnet_split.HParams",
"tensorflow.train.NewCheckpointReader",
"numpy.argmax",
"tensorflow.app.flags.DEFINE_string",
"resnet_split.ResNet",
"tensorflow.global_variables",
"time.time",
"tensorflow.app.flags.DEFINE_float",
"os.path.join",
"tensorflow.global_variables_initializer",
"numpy.sum",
"numpy.zeros",
"datetime.datetime.now",
"cifar100.CIFAR100Runner"
] | [((228, 331), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""./cifar100/train_val_split"""', '"""Path to the CIFAR-100 data."""'], {}), "('data_dir', './cifar100/train_val_split',\n 'Path to the CIFAR-100 data.')\n", (254, 331), True, 'import tensorflow as tf\n'), ((332, 420), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_classes"""', '(100)', '"""Number of classes in the dataset."""'], {}), "('num_classes', 100,\n 'Number of classes in the dataset.')\n", (359, 420), True, 'import tensorflow as tf\n'), ((421, 506), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_test_instance"""', '(10000)', '"""Number of test images."""'], {}), "('num_test_instance', 10000,\n 'Number of test images.')\n", (448, 506), True, 'import tensorflow as tf\n'), ((532, 625), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(100)', '"""Number of images to process in a batch."""'], {}), "('batch_size', 100,\n 'Number of images to process in a batch.')\n", (559, 625), True, 'import tensorflow as tf\n'), ((626, 819), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_residual_units"""', '(2)', '"""Number of residual block per group.\n Total number of conv layers will be 6n+4"""'], {}), '(\'num_residual_units\', 2,\n """Number of residual block per group.\n Total number of conv layers will be 6n+4"""\n )\n', (653, 819), True, 'import tensorflow as tf\n'), ((811, 874), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""k"""', '(8)', '"""Network width multiplier"""'], {}), "('k', 8, 'Network width multiplier')\n", (838, 874), True, 'import tensorflow as tf\n'), ((879, 950), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""ngroups1"""', '(1)', '"""Grouping number on logits"""'], {}), "('ngroups1', 1, 'Grouping number on logits')\n", (906, 950), True, 'import tensorflow as tf\n'), ((955, 1028), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""ngroups2"""', '(1)', '"""Grouping number on unit_3_x"""'], {}), "('ngroups2', 1, 'Grouping number on unit_3_x')\n", (982, 1028), True, 'import tensorflow as tf\n'), ((1033, 1106), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""ngroups3"""', '(1)', '"""Grouping number on unit_2_x"""'], {}), "('ngroups3', 1, 'Grouping number on unit_2_x')\n", (1060, 1106), True, 'import tensorflow as tf\n'), ((1141, 1233), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""l2_weight"""', '(0.0001)', '"""L2 loss weight applied all the weights"""'], {}), "('l2_weight', 0.0001,\n 'L2 loss weight applied all the weights')\n", (1166, 1233), True, 'import tensorflow as tf\n'), ((1234, 1313), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""momentum"""', '(0.9)', '"""The momentum of MomentumOptimizer"""'], {}), "('momentum', 0.9, 'The momentum of MomentumOptimizer')\n", (1259, 1313), True, 'import tensorflow as tf\n'), ((1318, 1387), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""initial_lr"""', '(0.1)', '"""Initial learning rate"""'], {}), "('initial_lr', 0.1, 'Initial learning rate')\n", (1343, 1387), True, 'import tensorflow as tf\n'), ((1392, 1501), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""lr_step_epoch"""', '"""80.0,120.0,160.0"""', '"""Epochs after which learing rate decays"""'], {}), "('lr_step_epoch', '80.0,120.0,160.0',\n 'Epochs after which learing rate decays')\n", (1418, 1501), True, 'import tensorflow as tf\n'), ((1502, 1574), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""lr_decay"""', '(0.1)', '"""Learning rate decay factor"""'], {}), "('lr_decay', 0.1, 'Learning rate decay factor')\n", (1527, 1574), True, 'import tensorflow as tf\n'), ((1579, 1649), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""finetune"""', '(False)', '"""Whether to finetune."""'], {}), "('finetune', False, 'Whether to finetune.')\n", (1606, 1649), True, 'import tensorflow as tf\n'), ((1682, 1786), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""basemodel"""', '"""./group/model.ckpt-199999"""', '"""Base model to load paramters"""'], {}), "('basemodel', './group/model.ckpt-199999',\n 'Base model to load paramters')\n", (1708, 1786), True, 'import tensorflow as tf\n'), ((1787, 1897), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint"""', '"""./split/model.ckpt-149999"""', '"""Path to the model checkpoint file"""'], {}), "('checkpoint', './split/model.ckpt-149999',\n 'Path to the model checkpoint file')\n", (1813, 1897), True, 'import tensorflow as tf\n'), ((1898, 1994), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""output_file"""', '"""./split/eval.pkl"""', '"""Path to the result pkl file"""'], {}), "('output_file', './split/eval.pkl',\n 'Path to the result pkl file')\n", (1924, 1994), True, 'import tensorflow as tf\n'), ((1995, 2092), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""test_iter"""', '(100)', '"""Number of test batches during the evaluation"""'], {}), "('test_iter', 100,\n 'Number of test batches during the evaluation')\n", (2022, 2092), True, 'import tensorflow as tf\n'), ((2093, 2189), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""display"""', '(10)', '"""Number of iterations to display training info."""'], {}), "('display', 10,\n 'Number of iterations to display training info.')\n", (2120, 2189), True, 'import tensorflow as tf\n'), ((2190, 2287), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""gpu_fraction"""', '(0.95)', '"""The fraction of GPU memory to be allocated"""'], {}), "('gpu_fraction', 0.95,\n 'The fraction of GPU memory to be allocated')\n", (2215, 2287), True, 'import tensorflow as tf\n'), ((2288, 2386), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Whether to log device placement."""'], {}), "('log_device_placement', False,\n 'Whether to log device placement.')\n", (2315, 2386), True, 'import tensorflow as tf\n'), ((14063, 14081), 'numpy.max', 'np.max', (['merge_idxs'], {}), '(merge_idxs)\n', (14069, 14081), True, 'import numpy as np\n'), ((14314, 14333), 'numpy.array', 'np.array', (['temp_list'], {}), '(temp_list)\n', (14322, 14333), True, 'import numpy as np\n'), ((14456, 14474), 'numpy.max', 'np.max', (['merge_idxs'], {}), '(merge_idxs)\n', (14462, 14474), True, 'import numpy as np\n'), ((15042, 15054), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (15052, 15054), True, 'import tensorflow as tf\n'), ((3635, 3686), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (3646, 3686), True, 'import tensorflow as tf\n'), ((3801, 3837), 'os.path.join', 'os.path.join', (['FLAGS.data_dir', '"""test"""'], {}), "(FLAGS.data_dir, 'test')\n", (3813, 3837), False, 'import os\n'), ((4236, 4332), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[FLAGS.batch_size, cifar100.IMAGE_SIZE, cifar100.IMAGE_SIZE, 3]'], {}), '(tf.float32, [FLAGS.batch_size, cifar100.IMAGE_SIZE, cifar100\n .IMAGE_SIZE, 3])\n', (4250, 4332), True, 'import tensorflow as tf\n'), ((4345, 4389), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[FLAGS.batch_size]'], {}), '(tf.int32, [FLAGS.batch_size])\n', (4359, 4389), True, 'import tensorflow as tf\n'), ((10176, 10506), 'resnet_split.HParams', 'resnet.HParams', ([], {'batch_size': 'FLAGS.batch_size', 'num_classes': 'FLAGS.num_classes', 'num_residual_units': 'FLAGS.num_residual_units', 'k': 'FLAGS.k', 'weight_decay': 'FLAGS.l2_weight', 'ngroups1': 'FLAGS.ngroups1', 'ngroups2': 'FLAGS.ngroups2', 'ngroups3': 'FLAGS.ngroups3', 'split_params': 'split_params', 'momentum': 'FLAGS.momentum', 'finetune': 'FLAGS.finetune'}), '(batch_size=FLAGS.batch_size, num_classes=FLAGS.num_classes,\n num_residual_units=FLAGS.num_residual_units, k=FLAGS.k, weight_decay=\n FLAGS.l2_weight, ngroups1=FLAGS.ngroups1, ngroups2=FLAGS.ngroups2,\n ngroups3=FLAGS.ngroups3, split_params=split_params, momentum=FLAGS.\n momentum, finetune=FLAGS.finetune)\n', (10190, 10506), True, 'import resnet_split as resnet\n'), ((10787, 10833), 'resnet_split.ResNet', 'resnet.ResNet', (['hp', 'images', 'labels', 'global_step'], {}), '(hp, images, labels, global_step)\n', (10800, 10833), True, 'import resnet_split as resnet\n'), ((11040, 11073), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11071, 11073), True, 'import tensorflow as tf\n'), ((12232, 12296), 'numpy.zeros', 'np.zeros', (['(FLAGS.num_classes, FLAGS.num_classes)'], {'dtype': 'np.int32'}), '((FLAGS.num_classes, FLAGS.num_classes), dtype=np.int32)\n', (12240, 12296), True, 'import numpy as np\n'), ((3851, 3882), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""test_image"""'], {}), "('test_image')\n", (3868, 3882), True, 'import tensorflow as tf\n'), ((3912, 4024), 'cifar100.CIFAR100Runner', 'cifar100.CIFAR100Runner', (['test_dataset_path'], {'image_per_thread': '(1)', 'shuffle': '(False)', 'distort': '(False)', 'capacity': '(5000)'}), '(test_dataset_path, image_per_thread=1, shuffle=\n False, distort=False, capacity=5000)\n', (3935, 4024), False, 'import cifar100\n'), ((4526, 4538), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (4534, 4538), False, 'import sys\n'), ((4880, 4925), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['FLAGS.basemodel'], {}), '(FLAGS.basemodel)\n', (4908, 4925), True, 'import tensorflow as tf\n'), ((11739, 11760), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (11758, 11760), True, 'import tensorflow as tf\n'), ((12002, 12013), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12010, 12013), False, 'import sys\n'), ((12447, 12458), 'time.time', 'time.time', ([], {}), '()\n', (12456, 12458), False, 'import time\n'), ((13923, 13946), 'cPickle.dump', 'pickle.dump', (['result', 'fd'], {}), '(result, fd)\n', (13934, 13946), True, 'import cPickle as pickle\n'), ((14282, 14302), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (14288, 14302), True, 'import numpy as np\n'), ((14670, 14690), 'numpy.concatenate', 'np.concatenate', (['temp'], {}), '(temp)\n', (14684, 14690), True, 'import numpy as np\n'), ((3588, 3598), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3596, 3598), True, 'import tensorflow as tf\n'), ((4753, 4783), 'numpy.argmax', 'np.argmax', (['split_alpha'], {'axis': '(0)'}), '(split_alpha, axis=0)\n', (4762, 4783), True, 'import numpy as np\n'), ((12691, 12702), 'time.time', 'time.time', ([], {}), '()\n', (12700, 12702), False, 'import time\n'), ((4808, 4829), 'numpy.where', 'np.where', (['(q_amax == i)'], {}), '(q_amax == i)\n', (4816, 4829), True, 'import numpy as np\n'), ((11199, 11264), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'FLAGS.gpu_fraction'}), '(per_process_gpu_memory_fraction=FLAGS.gpu_fraction)\n', (11212, 11264), True, 'import tensorflow as tf\n'), ((13274, 13288), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13286, 13288), False, 'from datetime import datetime\n')] |
import numpy as np
import torch
import random
import time
import pdb
import sys
import skmultiflow.utils.constants as constants
from skmultiflow.ADCN.ADCN_classifier.utilsADCN import meanStdCalculator, plotPerformance, reduceLabeledData
from skmultiflow.ADCN.ADCN_classifier.model import cluster
from sklearn.metrics import precision_score, normalized_mutual_info_score, \
adjusted_rand_score, recall_score, f1_score, cohen_kappa_score,fowlkes_mallows_score
import progressbar
#import other folder
from skmultiflow.ADCN.ADCN_classifier.evaluation_data_buffer import EvaluationDataBuffer
from skmultiflow.ADCN.ADCN_classifier.visualization.evaluation_visualizer import EvaluationVisualizer
def update_progress_bar(curr, total, steps):
progress = curr / total
progress_bar = round(progress * steps)
print('\r', '#' * progress_bar + '-' * (steps - progress_bar),
'[{:.0%}] '.format(progress), end='')
print("\n")
sys.stdout.flush()
def ADCNmain(ADCNnet,dataStreams,
metrics,batch_size,
show_plot = True,
nLabeled = 1,
layerGrowing = True,
nodeEvolution = True,
clusterGrowing = True,
lwfLoss = True,
clusteringLoss = True,
trainingBatchSize = 16,
noOfEpoch = 1,
device = torch.device('cpu')):
# random seed control
# np.random.seed(0)
# torch.manual_seed(0)
# random.seed(0)
# performance metrics
# performanceMetrics = meanStd() # [accuracy,testingLoss,testingTime,trainingTime]
visualizer = None
Accuracy = []
ARI = []
NMI = []
F1 = []
Precision = []
Recall = []
Kappa = []
Gmean = []
testingTime = []
trainingTime = []
# testingLoss = []
prevBatchData = []
Y_pred = []
Y_true = []
Iter = []
# for figure
AccuracyHistory = []
nHiddenLayerHistory = []
nHiddenNodeHistory = []
nClusterHistory = []
# network evolution
# netEvolution = meanStdCalculator() # [nHiddenNode,nHiddenLayer]
nHiddenNode = []
nHiddenLayer = []
nCluster = []
layerCount = 0
# Metrics + Plot
data_dict = {}
for metric in metrics:
data_ids = ["mean", "current"]
if metric == "true_vs_predict":
data_ids = ["y_true", "y_pred"]
data_dict[metric] = data_ids
data_buffer = EvaluationDataBuffer(data_dict=data_dict)
if show_plot:
visualizer = EvaluationVisualizer(dataset_name=dataStreams.stream_name,
task_type="classification",
n_wait = 0,
n_models = 1,
metrics=metrics,
model_names="ADCN",
data_dict=data_dict)
# initialization phase
start_initialization_train = time.time()
ADCNnet.initialization(dataStreams.labeledData, layerCount,
batchSize = trainingBatchSize, device = device)
if nLabeled == 1:
allegianceData = dataStreams.labeledData.clone()
allegianceLabel = dataStreams.labeledLabel.clone()
elif nLabeled < 1:
# reduced labeled data
allegianceData, allegianceLabel = reduceLabeledData(dataStreams.labeledData.clone(),
dataStreams.labeledLabel.clone(),
nLabeled)
print('Number of allegiance data: ', allegianceData.shape[0])
# update allegiance
# ADCNnet.updateAllegiance(dataStreams.labeledData, dataStreams.labeledLabel)
end_initialization_train = time.time()
initialization_time = end_initialization_train - start_initialization_train
for i in range(len(ADCNnet.hiddenNodeHist)):
Iter.append(i)
nHiddenLayerHistory.append(ADCNnet.nHiddenLayer)
AccuracyHistory.append(0)
nHiddenNodeHistory = ADCNnet.hiddenNodeHist
nClusterHistory = ADCNnet.clusterHistory
## batch loop, handling unlabeled samples
# training is conducted with single epoch. The initialization of a new layer uses epoch.
# bar = progressbar.ProgressBar(dataStreams.nBatch)
# bar = progressbar.ProgressBar()
sample_id = 0
for iBatch in range(0,dataStreams.nBatch):
# if iBatch%10 == 0 or iBatch == 0:
# print(iBatch,'-th batch')
# load data
batchIdx = iBatch + 1
batchData = dataStreams.unlabeledData[(batchIdx-1)*dataStreams.batchSize:batchIdx*dataStreams.batchSize]
batchLabel = dataStreams.unlabeledLabel[(batchIdx-1)*dataStreams.batchSize:batchIdx*dataStreams.batchSize]
# update
start_train = time.time()
if iBatch > 0 and layerGrowing:
# drift detection
ADCNnet.driftDetection(batchData, previousBatchData)
if ADCNnet.driftStatus == 2:
# grow layer if drift is confirmed driftStatus == 2
ADCNnet.layerGrowing()
layerCount += 1
# initialization phase
ADCNnet.initialization(dataStreams.labeledData,
layerCount,
batchSize = trainingBatchSize,
device = device)
# training data preparation
previousBatchData = batchData.clone()
batchData, batchLabel = ADCNnet.trainingDataPreparation(batchData, batchLabel)
# training
if ADCNnet.driftStatus == 0 or ADCNnet.driftStatus == 2: # only train if it is stable or drift
ADCNnet.fit(batchData, epoch = noOfEpoch)
ADCNnet.updateNetProperties()
# update allegiance
ADCNnet.updateAllegiance(allegianceData, allegianceLabel)
end_train = time.time()
training_time = end_train - start_train
# testing
ADCNnet.testing(batchData, batchLabel)
sample_id = sample_id + batch_size
current_y_pred = ADCNnet.predictedLabel.tolist()
current_y_true = ADCNnet.trueClassLabel.tolist()
# update_metrics
for metric in metrics:
values = [[],[]]
if metric == "true_vs_predict":
values[0].append(ADCNnet.trueClassLabel)
values[1].append(ADCNnet.predictedLabel)
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='y_true',
value=values[0])
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='y_pred',
value=values[1])
elif metric == "accuracy":
values[0].append(np.mean(Accuracy)/100)
values[1].append(ADCNnet.accuracy/100)
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='current',
value=values[1])
elif metric == "ari":
values[0].append(np.mean(ARI))
values[1].append(adjusted_rand_score(current_y_true, current_y_pred))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='current',
value=values[1])
elif metric == "nmi":
values[0].append(np.mean(NMI))
values[1].append(normalized_mutual_info_score(current_y_true, current_y_pred))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(metric_id=metric,
data_id='current',
value=values[1])
elif metric == "f1":
values[0].append(np.mean(F1))
values[1].append(f1_score(current_y_true, current_y_pred,
average='weighted'))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='current',
value=values[1])
elif metric == "precision":
values[0].append(np.mean(Precision))
values[1].append(precision_score(current_y_true, current_y_pred,
average='weighted'))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(metric_id=metric,
data_id='current',
value=values[1])
elif metric == "recall":
values[0].append(np.mean(Recall))
values[1].append(recall_score(current_y_true, current_y_pred,
average='weighted'))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(metric_id=metric,
data_id='current',
value=values[1])
elif metric == "kappa":
values[0].append(np.mean(Kappa))
values[1].append(cohen_kappa_score(current_y_true, current_y_pred))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(metric_id=metric,
data_id='current',
value=values[1])
elif metric == "gmean":
values[0].append(np.mean(Gmean))
values[1].append(fowlkes_mallows_score(current_y_true,current_y_pred))
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='mean',
value=values[0])
data_buffer.update_data(sample_id = sample_id,
metric_id=metric,
data_id='current',
value=values[1])
if show_plot and visualizer is not None:
visualizer.on_new_train_step(sample_id,data_buffer)
# if iBatch > 0:
Y_pred = Y_pred + ADCNnet.predictedLabel.tolist()
Y_true = Y_true + ADCNnet.trueClassLabel.tolist()
# if iBatch > 0:
# calculate performance
Accuracy.append(ADCNnet.accuracy)
ARI.append(adjusted_rand_score(current_y_true, current_y_pred))
NMI.append(normalized_mutual_info_score(current_y_true, current_y_pred))
F1.append(f1_score(current_y_true, current_y_pred,
average='weighted'))
Precision.append(precision_score(current_y_true, current_y_pred,
average='weighted'))
Recall.append(recall_score(current_y_true, current_y_pred,
average='weighted'))
Kappa.append(cohen_kappa_score(current_y_true, current_y_pred))
Gmean.append(fowlkes_mallows_score(current_y_true,current_y_pred))
AccuracyHistory.append(ADCNnet.accuracy)
testingTime.append(ADCNnet.testingTime)
trainingTime.append(training_time)
# print("Accuracy : "),
# print(ADCNnet.accuracy)
# testingLoss.append(ADCNnet.testingLoss)
# calculate network evolution
nHiddenLayer.append(ADCNnet.nHiddenLayer)
nHiddenNode.append(ADCNnet.nHiddenNode)
nCluster.append(ADCNnet.nCluster)
nHiddenLayerHistory.append(ADCNnet.nHiddenLayer)
nHiddenNodeHistory.append(ADCNnet.nHiddenNode)
nClusterHistory.append(ADCNnet.nCluster)
Iter.append(iBatch + i + 1)
#if iBatch%10 == 0 or iBatch == 0:
# print('Accuracy: ',np.mean(Accuracy))
update_progress_bar(iBatch,dataStreams.nBatch,20)
#print('\n')
#print('=== Performance result ===')
#print('Accuracy: ',np.mean(Accuracy),'(+/-)',np.std(Accuracy))
#print('ARI: ',adjusted_rand_score(Y_true, Y_pred))
#print('NMI: ',normalized_mutual_info_score(Y_true, Y_pred))
#print('F1 score: ',f1_score(Y_true, Y_pred, average='weighted'))
#print('Precision: ',precision_score(Y_true, Y_pred, average='weighted'))
#print('Recall: ',recall_score(Y_true, Y_pred, average='weighted'))
#print('Testing Time: ',np.mean(testingTime),'(+/-)',np.std(testingTime))
#print('Training Time: ',np.mean(trainingTime) + initialization_time,'(+/-)',np.std(trainingTime))
#print('Total Time: ',np.sum(trainingTime) + initialization_time)
# print('Testing Loss: ',np.mean(testingLoss),'(+/-)',np.std(testingLoss))
# print('\n')
# print('=== Average network evolution ===')
# print('Number of layer: ',np.mean(nHiddenLayer),'(+/-)',np.std(nHiddenLayer))
# print('Total hidden node: ',np.mean(nHiddenNode),'(+/-)',np.std(nHiddenNode))
# print('Number of cluster: ',np.mean(nCluster),'(+/-)',np.std(nCluster))
# print('\n')
# print('=== Final network structure ===')
# ADCNnet.getNetProperties()
# print('\n')f1_score
# print('=== Precision Recall ===')
# print(classification_report(Y_true, Y_pred))
# 0: accuracy
# 1: ARI
# 2: NMI
# 3: f1_score
# 4: precision_score
# 5: recall_score
# 6: training_time
# 7: testingTime
# 8: nHiddenLayer
# 9: nHiddenNode
# 10: nCluster
allPerformance = [
np.mean(Accuracy),
adjusted_rand_score(Y_true, Y_pred),
normalized_mutual_info_score(Y_true, Y_pred),
f1_score(Y_true, Y_pred, average='weighted'),
precision_score(Y_true, Y_pred, average='weighted'),
recall_score(Y_true, Y_pred, average='weighted'),
(np.mean(trainingTime) + initialization_time),np.mean(testingTime),
cohen_kappa_score(Y_true, Y_pred),
fowlkes_mallows_score(Y_true,Y_pred),
ADCNnet.nHiddenLayer,
ADCNnet.nHiddenNode,
ADCNnet.nCluster
]
performanceHistory = [
Iter,
AccuracyHistory,
nHiddenLayerHistory,
nHiddenNodeHistory,
nClusterHistory
]
visualizer.hold()
return ADCNnet, performanceHistory, allPerformance
# ============================= Multi Task Learning =============================
def ADCNmainMT(ADCNnet, dataStreams,
nLabeled = 1, layerGrowing = True,
nodeEvolution = True, clusterGrowing = True,
lwfLoss = True, clusteringLoss = True,
trainingBatchSize = 16, noOfEpoch = 1,
device = torch.device('cpu')):
# for multi task learning
# random seed control
# np.random.seed(0)
# torch.manual_seed(0)
# random.seed(0)
# performance metrics
# performanceMetrics = meanStd() # [accuracy,testingLoss,testingTime,trainingTime]
Accuracy = []
testingTime = []
trainingTime = []
# testingLoss = []
prevBatchData = []
# multi task
currTask = 0
prevTask = 0
postTaskAcc = []
preTaskAcc = []
Y_pred = []
Y_true = []
Iter = []
# for figure
AccuracyHistory = []
nHiddenLayerHistory = []
nHiddenNodeHistory = []
nClusterHistory = []
# network evolution
# netEvolution = meanStdCalculator() # [nHiddenNode,nHiddenLayer]
nHiddenNode = []
nHiddenLayer = []
nCluster = []
layerCount = 0
# initiate network to handle the new task, trained on the initial data in the current task
start_initialization_train = time.time()
ADCNnet.initialization(dataStreams.labeledData[currTask], layerCount,
batchSize = trainingBatchSize, device = device)
end_initialization_train = time.time()
initialization_time = end_initialization_train - start_initialization_train
# collection of labeled data
if nLabeled == 1:
labeledData = dataStreams.labeledData[currTask]
labeledLabel = dataStreams.labeledLabel[currTask]
elif nLabeled < 1:
# reduced labeled data
labeledData, labeledLabel = reduceLabeledData(dataStreams.labeledData[currTask].clone(),
dataStreams.labeledLabel[currTask].clone(),
nLabeled)
print('Number of initial allegiance data: ', labeledData.shape[0])
for i in range(len(ADCNnet.hiddenNodeHist)):
Iter.append(i)
nHiddenLayerHistory.append(ADCNnet.nHiddenLayer)
AccuracyHistory.append(0)
nHiddenNodeHistory = ADCNnet.hiddenNodeHist
nClusterHistory = ADCNnet.clusterHistory
## batch loop, handling unlabeled samples
# training is conducted with single epoch. The initialization of a new layer uses epoch.
# bar = progressbar.ProgressBar(max_value=dataStreams.nBatch)
# bar = progressbar.ProgressBar()
batchIdx = 0
for iBatch in range(0, dataStreams.nBatch):
currTask = dataStreams.taskIndicator[iBatch]
# update
start_train = time.time()
if currTask != prevTask and currTask > prevTask:
batchIdx = 0
# store previous task model
ADCNnet.storeOldModel(prevTask)
# test on the prev task before entering curr task. For calculating BWT.
prevBatchData = dataStreams.unlabeledDataTest[prevTask]
prevBatchLabel = dataStreams.unlabeledLabelTest[prevTask]
ADCNnet.testing(prevBatchData, prevBatchLabel)
postTaskAcc.append(ADCNnet.accuracy)
# test on the current task after finishing prev task. For calculating FWT.
currBatchData = dataStreams.unlabeledDataTest[currTask]
currBatchLabel = dataStreams.unlabeledLabelTest[currTask]
# update allegiance
ADCNnet.updateAllegiance(labeledData, labeledLabel, randomTesting = True)
ADCNnet.testing(currBatchData, currBatchLabel)
preTaskAcc.append(ADCNnet.accuracy)
# initiate network to handle the new task, trained on the initial data in the current task
ADCNnet.fitCL(dataStreams.labeledData[currTask],
reconsLoss = True, epoch = 50,
unlabeled = False)
# augment the collection of unlabeled samples ***************
if nLabeled == 1:
labeledData = torch.cat((labeledData,dataStreams.labeledData[currTask]),0)
labeledLabel = torch.cat((labeledLabel,dataStreams.labeledLabel[currTask]),0)
elif nLabeled < 1:
reducedData, reducedLabel = reduceLabeledData(dataStreams.labeledData[currTask].clone(),
dataStreams.labeledLabel[currTask].clone(),
nLabeled)
labeledData = torch.cat((labeledData,reducedData),0)
labeledLabel = torch.cat((labeledLabel,reducedLabel),0)
print('Number of newly added allegiance data: ', reducedData.shape[0])
# load data
batchIdx = batchIdx + 1
batchData = dataStreams.unlabeledData[currTask][(batchIdx-1)*dataStreams.batchSize:batchIdx*dataStreams.batchSize]
batchLabel = dataStreams.unlabeledLabel[currTask][(batchIdx-1)*dataStreams.batchSize:batchIdx*dataStreams.batchSize]
print(batchIdx,'-th batch',currTask,'-th task')
if iBatch > 0 and layerGrowing:
# if batchData.shape[0] == 0:
# continue
# drift detection
ADCNnet.driftDetection(batchData, previousBatchData)
if ADCNnet.driftStatus == 2:
# grow layer if drift is confirmed driftStatus == 2
ADCNnet.layerGrowing()
layerCount += 1
# initialization phase
# need to augment data from previous task also?
ADCNnet.initialization(dataStreams.labeledData[currTask],
layerCount,
batchSize = trainingBatchSize,
device = device)
# ADCNnet.fitEndtoEnd(batchData)
# training data preparation
previousBatchData = batchData.clone()
batchData, batchLabel = ADCNnet.trainingDataPreparation(batchData, batchLabel)
# training
if ADCNnet.driftStatus == 0 or ADCNnet.driftStatus == 2: # only train if it is stable or drift
ADCNnet.fit(batchData, epoch = noOfEpoch)
ADCNnet.updateNetProperties()
# multi task training
if len(ADCNnet.ADCNold) > 0 and ADCNnet.regStrLWF != 0.0:
ADCNnet.fitCL(batchData)
# update allegiance
ADCNnet.updateAllegiance(labeledData, labeledLabel)
end_train = time.time()
training_time = end_train - start_train
# testing
ADCNnet.testing(batchData, batchLabel)
# if iBatch > 0:
Y_pred = Y_pred + ADCNnet.predictedLabel.tolist()
Y_true = Y_true + ADCNnet.trueClassLabel.tolist()
prevTask = dataStreams.taskIndicator[iBatch]
Accuracy.append(ADCNnet.accuracy)
AccuracyHistory.append(ADCNnet.accuracy)
testingTime.append(ADCNnet.testingTime)
trainingTime.append(training_time)
# calculate performance
if iBatch%10 == 0 or iBatch == 0:
print('Accuracy: ',np.mean(Accuracy))
# testingLoss.append(ADCNnet.testingLoss)
# calculate network evolution
nHiddenLayer.append(ADCNnet.nHiddenLayer)
nHiddenNode.append(ADCNnet.nHiddenNode)
nCluster.append(ADCNnet.nCluster)
nHiddenLayerHistory.append(ADCNnet.nHiddenLayer)
nHiddenNodeHistory.append(ADCNnet.nHiddenNode)
nClusterHistory.append(ADCNnet.nCluster)
Iter.append(iBatch + i + 1)
# bar.update(iBatch+1)
# final test, all tasks, except the last task. For calculating BWT
allTaskAccuracies = []
Y_predTasks = []
Y_trueTasks = []
for iTask in range(len(dataStreams.unlabeledData)-1):
ADCNnet.testing(dataStreams.unlabeledDataTest[iTask],
dataStreams.unlabeledLabelTest[iTask])
allTaskAccuracies.append(ADCNnet.accuracy)
Y_predTasks = Y_predTasks + ADCNnet.predictedLabel.tolist()
Y_trueTasks = Y_trueTasks + ADCNnet.trueClassLabel.tolist()
BWT = 1/(dataStreams.nTask-1)*(np.sum(allTaskAccuracies)-np.sum(postTaskAcc))
# test on the last task
ADCNnet.testing(dataStreams.unlabeledDataTest[len(dataStreams.unlabeledData)-1],
dataStreams.unlabeledLabelTest[len(dataStreams.unlabeledData)-1])
allTaskAccuracies.append(ADCNnet.accuracy)
# test with random initialization. For calculating FWT.
b_matrix = []
for iTask in range(1, len(dataStreams.unlabeledData)):
ADCNnet.randomTesting(dataStreams.unlabeledDataTest[iTask],
dataStreams.unlabeledLabelTest[iTask])
b_matrix.append(ADCNnet.accuracy)
FWT = 1/(dataStreams.nTask-1)*(np.sum(preTaskAcc)-np.sum(b_matrix))
print('\n')
print('=== Performance result ===')
print('Prequential Accuracy: ',np.mean(Accuracy),'(+/-)',np.std(Accuracy))
print('Prequential F1 score: ',f1_score(Y_true, Y_pred, average='weighted'))
print('Prequential ARI: ',adjusted_rand_score(Y_true, Y_pred))
print('Prequential NMI: ',normalized_mutual_info_score(Y_true, Y_pred))
print('Mean Task Accuracy: ',np.mean(allTaskAccuracies),'(+/-)',np.std(allTaskAccuracies))
print('All Task Accuracy: ',allTaskAccuracies)
print('Post Task Accuracy: ',postTaskAcc) # test results on the prev task before entering curr task.
print('Pre Task Accuracy: ',preTaskAcc) # test results on the current task after finishing prev task.
print('B Matrix: ',b_matrix) # test results on the current task after finishing prev task.
# print('F1 score: ',f1_score(Y_true, Y_pred, average='weighted'))
# print('Precision: ',precision_score(Y_true, Y_pred, average='weighted'))
# print('Recall: ',recall_score(Y_true, Y_pred, average='weighted'))
print('BWT: ',BWT)
print('FWT: ',FWT)
print('Testing Time: ',np.mean(testingTime),'(+/-)',np.std(testingTime))
print('Training Time: ',np.mean(trainingTime) + initialization_time,'(+/-)',np.std(trainingTime))
print('Total Time: ',np.sum(trainingTime) + initialization_time)
# print('Testing Loss: ',np.mean(testingLoss),'(+/-)',np.std(testingLoss))
print('\n')
print('=== Average network evolution ===')
print('Number of layer: ',np.mean(nHiddenLayer),'(+/-)',np.std(nHiddenLayer))
print('Total hidden node: ',np.mean(nHiddenNode),'(+/-)',np.std(nHiddenNode))
print('Number of cluster: ',np.mean(nCluster),'(+/-)',np.std(nCluster))
print('\n')
print('=== Final network structure ===')
ADCNnet.getNetProperties()
# 0: accuracy
# 1: all tasks accuracy
# 2: BWT
# 3: FWT
# 4: ARI
# 5: NMI
# 6: f1_score
# 7: precision_score
# 8: recall_score
# 9: training_time
# 10: testingTime
# 11: nHiddenLayer
# 12: nHiddenNode
# 13: nCluster
allPerformance = [
np.mean(Accuracy),
np.mean(allTaskAccuracies),
BWT,
FWT,
adjusted_rand_score(Y_true, Y_pred),
normalized_mutual_info_score(Y_true, Y_pred),
f1_score(Y_true, Y_pred, average='weighted'),
precision_score(Y_true, Y_pred, average='weighted'),
recall_score(Y_true, Y_pred, average='weighted'),
(np.mean(trainingTime) + initialization_time),
np.mean(testingTime),
ADCNnet.nHiddenLayer,
ADCNnet.nHiddenNode,
ADCNnet.nCluster
]
# print('\n')f1_score
# print('=== Precision Recall ===')
# print(classification_report(Y_true, Y_pred))
performanceHistory = [
Iter,
AccuracyHistory,
nHiddenLayerHistory,
nHiddenNodeHistory,
nClusterHistory
]
return ADCNnet, performanceHistory, allPerformance
| [
"numpy.mean",
"skmultiflow.ADCN.ADCN_classifier.visualization.evaluation_visualizer.EvaluationVisualizer",
"sklearn.metrics.f1_score",
"skmultiflow.ADCN.ADCN_classifier.evaluation_data_buffer.EvaluationDataBuffer",
"sklearn.metrics.adjusted_rand_score",
"sklearn.metrics.cohen_kappa_score",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.sum",
"sklearn.metrics.fowlkes_mallows_score",
"numpy.std",
"sklearn.metrics.normalized_mutual_info_score",
"sys.stdout.flush",
"time.time",
"torch.cat",
"torch.device"
] | [((970, 988), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (986, 988), False, 'import sys\n'), ((1361, 1380), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1373, 1380), False, 'import torch\n'), ((3028, 3039), 'time.time', 'time.time', ([], {}), '()\n', (3037, 3039), False, 'import time\n'), ((3840, 3851), 'time.time', 'time.time', ([], {}), '()\n', (3849, 3851), False, 'import time\n'), ((16657, 16676), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16669, 16676), False, 'import torch\n'), ((17623, 17634), 'time.time', 'time.time', ([], {}), '()\n', (17632, 17634), False, 'import time\n'), ((17818, 17829), 'time.time', 'time.time', ([], {}), '()\n', (17827, 17829), False, 'import time\n'), ((2458, 2499), 'skmultiflow.ADCN.ADCN_classifier.evaluation_data_buffer.EvaluationDataBuffer', 'EvaluationDataBuffer', ([], {'data_dict': 'data_dict'}), '(data_dict=data_dict)\n', (2478, 2499), False, 'from skmultiflow.ADCN.ADCN_classifier.evaluation_data_buffer import EvaluationDataBuffer\n'), ((2539, 2715), 'skmultiflow.ADCN.ADCN_classifier.visualization.evaluation_visualizer.EvaluationVisualizer', 'EvaluationVisualizer', ([], {'dataset_name': 'dataStreams.stream_name', 'task_type': '"""classification"""', 'n_wait': '(0)', 'n_models': '(1)', 'metrics': 'metrics', 'model_names': '"""ADCN"""', 'data_dict': 'data_dict'}), "(dataset_name=dataStreams.stream_name, task_type=\n 'classification', n_wait=0, n_models=1, metrics=metrics, model_names=\n 'ADCN', data_dict=data_dict)\n", (2559, 2715), False, 'from skmultiflow.ADCN.ADCN_classifier.visualization.evaluation_visualizer import EvaluationVisualizer\n'), ((4896, 4907), 'time.time', 'time.time', ([], {}), '()\n', (4905, 4907), False, 'import time\n'), ((6021, 6032), 'time.time', 'time.time', ([], {}), '()\n', (6030, 6032), False, 'import time\n'), ((15512, 15529), 'numpy.mean', 'np.mean', (['Accuracy'], {}), '(Accuracy)\n', (15519, 15529), True, 'import numpy as np\n'), ((15539, 15574), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (15558, 15574), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15584, 15628), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (15612, 15628), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15638, 15682), 'sklearn.metrics.f1_score', 'f1_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (15646, 15682), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15692, 15743), 'sklearn.metrics.precision_score', 'precision_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (15707, 15743), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15753, 15801), 'sklearn.metrics.recall_score', 'recall_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (15765, 15801), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15857, 15877), 'numpy.mean', 'np.mean', (['testingTime'], {}), '(testingTime)\n', (15864, 15877), True, 'import numpy as np\n'), ((15887, 15920), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (15904, 15920), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15930, 15967), 'sklearn.metrics.fowlkes_mallows_score', 'fowlkes_mallows_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (15951, 15967), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((19140, 19151), 'time.time', 'time.time', ([], {}), '()\n', (19149, 19151), False, 'import time\n'), ((23018, 23029), 'time.time', 'time.time', ([], {}), '()\n', (23027, 23029), False, 'import time\n'), ((25461, 25478), 'numpy.mean', 'np.mean', (['Accuracy'], {}), '(Accuracy)\n', (25468, 25478), True, 'import numpy as np\n'), ((25487, 25503), 'numpy.std', 'np.std', (['Accuracy'], {}), '(Accuracy)\n', (25493, 25503), True, 'import numpy as np\n'), ((25540, 25584), 'sklearn.metrics.f1_score', 'f1_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (25548, 25584), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((25616, 25651), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (25635, 25651), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((25683, 25727), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (25711, 25727), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((25762, 25788), 'numpy.mean', 'np.mean', (['allTaskAccuracies'], {}), '(allTaskAccuracies)\n', (25769, 25788), True, 'import numpy as np\n'), ((25797, 25822), 'numpy.std', 'np.std', (['allTaskAccuracies'], {}), '(allTaskAccuracies)\n', (25803, 25822), True, 'import numpy as np\n'), ((26499, 26519), 'numpy.mean', 'np.mean', (['testingTime'], {}), '(testingTime)\n', (26506, 26519), True, 'import numpy as np\n'), ((26528, 26547), 'numpy.std', 'np.std', (['testingTime'], {}), '(testingTime)\n', (26534, 26547), True, 'import numpy as np\n'), ((26629, 26649), 'numpy.std', 'np.std', (['trainingTime'], {}), '(trainingTime)\n', (26635, 26649), True, 'import numpy as np\n'), ((26897, 26918), 'numpy.mean', 'np.mean', (['nHiddenLayer'], {}), '(nHiddenLayer)\n', (26904, 26918), True, 'import numpy as np\n'), ((26927, 26947), 'numpy.std', 'np.std', (['nHiddenLayer'], {}), '(nHiddenLayer)\n', (26933, 26947), True, 'import numpy as np\n'), ((26981, 27001), 'numpy.mean', 'np.mean', (['nHiddenNode'], {}), '(nHiddenNode)\n', (26988, 27001), True, 'import numpy as np\n'), ((27010, 27029), 'numpy.std', 'np.std', (['nHiddenNode'], {}), '(nHiddenNode)\n', (27016, 27029), True, 'import numpy as np\n'), ((27063, 27080), 'numpy.mean', 'np.mean', (['nCluster'], {}), '(nCluster)\n', (27070, 27080), True, 'import numpy as np\n'), ((27089, 27105), 'numpy.std', 'np.std', (['nCluster'], {}), '(nCluster)\n', (27095, 27105), True, 'import numpy as np\n'), ((27505, 27522), 'numpy.mean', 'np.mean', (['Accuracy'], {}), '(Accuracy)\n', (27512, 27522), True, 'import numpy as np\n'), ((27532, 27558), 'numpy.mean', 'np.mean', (['allTaskAccuracies'], {}), '(allTaskAccuracies)\n', (27539, 27558), True, 'import numpy as np\n'), ((27594, 27629), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (27613, 27629), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((27639, 27683), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (27667, 27683), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((27693, 27737), 'sklearn.metrics.f1_score', 'f1_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (27701, 27737), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((27747, 27798), 'sklearn.metrics.precision_score', 'precision_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (27762, 27798), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((27808, 27856), 'sklearn.metrics.recall_score', 'recall_score', (['Y_true', 'Y_pred'], {'average': '"""weighted"""'}), "(Y_true, Y_pred, average='weighted')\n", (27820, 27856), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((27921, 27941), 'numpy.mean', 'np.mean', (['testingTime'], {}), '(testingTime)\n', (27928, 27941), True, 'import numpy as np\n'), ((12476, 12527), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (12495, 12527), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((12548, 12608), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (12576, 12608), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((12628, 12688), 'sklearn.metrics.f1_score', 'f1_score', (['current_y_true', 'current_y_pred'], {'average': '"""weighted"""'}), "(current_y_true, current_y_pred, average='weighted')\n", (12636, 12688), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((12742, 12809), 'sklearn.metrics.precision_score', 'precision_score', (['current_y_true', 'current_y_pred'], {'average': '"""weighted"""'}), "(current_y_true, current_y_pred, average='weighted')\n", (12757, 12809), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((12874, 12938), 'sklearn.metrics.recall_score', 'recall_score', (['current_y_true', 'current_y_pred'], {'average': '"""weighted"""'}), "(current_y_true, current_y_pred, average='weighted')\n", (12886, 12938), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((12996, 13045), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (13013, 13045), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((13068, 13121), 'sklearn.metrics.fowlkes_mallows_score', 'fowlkes_mallows_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (13089, 13121), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((15812, 15833), 'numpy.mean', 'np.mean', (['trainingTime'], {}), '(trainingTime)\n', (15819, 15833), True, 'import numpy as np\n'), ((24679, 24704), 'numpy.sum', 'np.sum', (['allTaskAccuracies'], {}), '(allTaskAccuracies)\n', (24685, 24704), True, 'import numpy as np\n'), ((24705, 24724), 'numpy.sum', 'np.sum', (['postTaskAcc'], {}), '(postTaskAcc)\n', (24711, 24724), True, 'import numpy as np\n'), ((25332, 25350), 'numpy.sum', 'np.sum', (['preTaskAcc'], {}), '(preTaskAcc)\n', (25338, 25350), True, 'import numpy as np\n'), ((25351, 25367), 'numpy.sum', 'np.sum', (['b_matrix'], {}), '(b_matrix)\n', (25357, 25367), True, 'import numpy as np\n'), ((26577, 26598), 'numpy.mean', 'np.mean', (['trainingTime'], {}), '(trainingTime)\n', (26584, 26598), True, 'import numpy as np\n'), ((26676, 26696), 'numpy.sum', 'np.sum', (['trainingTime'], {}), '(trainingTime)\n', (26682, 26696), True, 'import numpy as np\n'), ((27867, 27888), 'numpy.mean', 'np.mean', (['trainingTime'], {}), '(trainingTime)\n', (27874, 27888), True, 'import numpy as np\n'), ((20511, 20573), 'torch.cat', 'torch.cat', (['(labeledData, dataStreams.labeledData[currTask])', '(0)'], {}), '((labeledData, dataStreams.labeledData[currTask]), 0)\n', (20520, 20573), False, 'import torch\n'), ((20603, 20667), 'torch.cat', 'torch.cat', (['(labeledLabel, dataStreams.labeledLabel[currTask])', '(0)'], {}), '((labeledLabel, dataStreams.labeledLabel[currTask]), 0)\n', (20612, 20667), False, 'import torch\n'), ((23628, 23645), 'numpy.mean', 'np.mean', (['Accuracy'], {}), '(Accuracy)\n', (23635, 23645), True, 'import numpy as np\n'), ((21012, 21052), 'torch.cat', 'torch.cat', (['(labeledData, reducedData)', '(0)'], {}), '((labeledData, reducedData), 0)\n', (21021, 21052), False, 'import torch\n'), ((21082, 21124), 'torch.cat', 'torch.cat', (['(labeledLabel, reducedLabel)', '(0)'], {}), '((labeledLabel, reducedLabel), 0)\n', (21091, 21124), False, 'import torch\n'), ((7095, 7112), 'numpy.mean', 'np.mean', (['Accuracy'], {}), '(Accuracy)\n', (7102, 7112), True, 'import numpy as np\n'), ((7714, 7726), 'numpy.mean', 'np.mean', (['ARI'], {}), '(ARI)\n', (7721, 7726), True, 'import numpy as np\n'), ((7761, 7812), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (7780, 7812), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((8353, 8365), 'numpy.mean', 'np.mean', (['NMI'], {}), '(NMI)\n', (8360, 8365), True, 'import numpy as np\n'), ((8400, 8460), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (8428, 8460), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((8938, 8949), 'numpy.mean', 'np.mean', (['F1'], {}), '(F1)\n', (8945, 8949), True, 'import numpy as np\n'), ((8984, 9044), 'sklearn.metrics.f1_score', 'f1_score', (['current_y_true', 'current_y_pred'], {'average': '"""weighted"""'}), "(current_y_true, current_y_pred, average='weighted')\n", (8992, 9044), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((9634, 9652), 'numpy.mean', 'np.mean', (['Precision'], {}), '(Precision)\n', (9641, 9652), True, 'import numpy as np\n'), ((9687, 9754), 'sklearn.metrics.precision_score', 'precision_score', (['current_y_true', 'current_y_pred'], {'average': '"""weighted"""'}), "(current_y_true, current_y_pred, average='weighted')\n", (9702, 9754), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((10285, 10300), 'numpy.mean', 'np.mean', (['Recall'], {}), '(Recall)\n', (10292, 10300), True, 'import numpy as np\n'), ((10335, 10399), 'sklearn.metrics.recall_score', 'recall_score', (['current_y_true', 'current_y_pred'], {'average': '"""weighted"""'}), "(current_y_true, current_y_pred, average='weighted')\n", (10347, 10399), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((10926, 10940), 'numpy.mean', 'np.mean', (['Kappa'], {}), '(Kappa)\n', (10933, 10940), True, 'import numpy as np\n'), ((10975, 11024), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (10992, 11024), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n'), ((11505, 11519), 'numpy.mean', 'np.mean', (['Gmean'], {}), '(Gmean)\n', (11512, 11519), True, 'import numpy as np\n'), ((11554, 11607), 'sklearn.metrics.fowlkes_mallows_score', 'fowlkes_mallows_score', (['current_y_true', 'current_y_pred'], {}), '(current_y_true, current_y_pred)\n', (11575, 11607), False, 'from sklearn.metrics import precision_score, normalized_mutual_info_score, adjusted_rand_score, recall_score, f1_score, cohen_kappa_score, fowlkes_mallows_score\n')] |
from __future__ import absolute_import
import torch as t
from torch import nn
from torchvision.models import vgg16
from torchvision.ops import RoIPool
from RPN import RegionProposalNetwork
from faster_rcnn import FasterRCNN
import array_tools as at
def VGG16():
model = vgg16(pretrained=True) # pretrained参数默认为False,ImgNet的参数
features = list(model.features)[:-1] # 31层,去掉最后的Max pooling
classifier = list(model.classifier)[:-1] # 去掉最后的线性层
for layer in features[:10]: # 对于前十层不变
for p in layer.parameters():
p.requires_grad= False
return nn.Sequential(*features), nn.Sequential(*classifier)
# feature =
# [Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
# Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
# Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
# Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
# Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
# ReLU(inplace=True),
# classifier=
# Sequential(
# (0): Linear(in_features=25088, out_features=4096, bias=True)
# (1): ReLU(inplace=True)
# (2): Dropout(p=0.5, inplace=False)
# (3): Linear(in_features=4096, out_features=4096, bias=True)
# (4): ReLU(inplace=True)
# (5): Dropout(p=0.5, inplace=False)
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
class FasterRCNNVGG16(FasterRCNN):
def __init__(self, n_class=20, feat_stride=16,# 不包括背景
ratios=[0.5, 1, 2],
anchor_scales=[8, 16, 32]):
extractor, classifier = VGG16()
rpn = RegionProposalNetwork(
512, 512,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=feat_stride
)
head = RoiHead(
n_class=n_class + 1, # 包括背景
roi_size=7,
spatial_scale=(1. / feat_stride),
classifier=classifier
)
super(FasterRCNNVGG16, self).__init__(
extractor,
rpn,
head
)
class RoiHead(nn.Module):
# 包括背景(21类)
def __init__(self, n_class, roi_size, spatial_scale, classifier):
super(RoiHead, self).__init__()
self.classifier = classifier
self.loc = nn.Linear(4096, n_class * 4)
self.scores = nn.Linear(4096, n_class)
normal_init(self.loc, 0, 0.001)
normal_init(self.scores, 0, 0.001)
self.n_class = n_class
self.roi_size = roi_size
self.spatial_scale = spatial_scale #采样比例
self.roi = RoIPool((self.roi_size, self.roi_size), self.spatial_scale)
# ROI pooling具体操作如下:
# (1)根据输入image,将ROI映射到feature map对应位置;
# (2)将映射后的区域划分为相同大小的sections(sections数量与输出的维度相同);
# (3)对每个sections进行max pooling操作;
def forward(self, x, rois):
index = t.zeros(rois.shape[0]) # batch size=1,索引全为0
index = at.totensor(index).float()
rois = at.totensor(rois).float()
rois_with_index = t.cat([index[:, None], rois], dim=1) # [:, None] 转置
rois_with_index = rois_with_index[:, [0, 2, 1, 4, 3]]
# torchvision.ops.roi_pool(input, boxes, output_size, spatial_scale=1.0)
# input(Tensor[N, C, H, W]) – 输入张量
# boxes(Tensor[K, 5] or List[Tensor[L, 4]]) – 输入的box坐标,格式:list(x1, y1, x2, y2)
# 或者(batch_index, x1, y1, x2, y2)
# output_size(int or Tuple[int, int]) – 输出尺寸, 格式: (height, width)
# spatial_scale(float) – 将输入坐标映射到box坐标的尺度因子.默认: 1.0
pool = self.roi(x, rois_with_index) # (128/300, 512, 7, 7)
pool = pool.view(pool.size(0), -1)
fc7 = self.classifier(pool)
roi_loc = self.loc(fc7)
roi_scores = self.scores(fc7)
return roi_loc, roi_scores
def normal_init(m, mean, std):
m.weight.data.normal_(mean, std)
m.bias.data.zero_() | [
"array_tools.totensor",
"torch.nn.Sequential",
"torchvision.ops.RoIPool",
"RPN.RegionProposalNetwork",
"torch.nn.Linear",
"torchvision.models.vgg16",
"torch.zeros",
"torch.cat"
] | [((288, 310), 'torchvision.models.vgg16', 'vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (293, 310), False, 'from torchvision.models import vgg16\n'), ((595, 619), 'torch.nn.Sequential', 'nn.Sequential', (['*features'], {}), '(*features)\n', (608, 619), False, 'from torch import nn\n'), ((621, 647), 'torch.nn.Sequential', 'nn.Sequential', (['*classifier'], {}), '(*classifier)\n', (634, 647), False, 'from torch import nn\n'), ((2846, 2950), 'RPN.RegionProposalNetwork', 'RegionProposalNetwork', (['(512)', '(512)'], {'ratios': 'ratios', 'anchor_scales': 'anchor_scales', 'feat_stride': 'feat_stride'}), '(512, 512, ratios=ratios, anchor_scales=anchor_scales,\n feat_stride=feat_stride)\n', (2867, 2950), False, 'from RPN import RegionProposalNetwork\n'), ((3531, 3559), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(n_class * 4)'], {}), '(4096, n_class * 4)\n', (3540, 3559), False, 'from torch import nn\n'), ((3583, 3607), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'n_class'], {}), '(4096, n_class)\n', (3592, 3607), False, 'from torch import nn\n'), ((3833, 3892), 'torchvision.ops.RoIPool', 'RoIPool', (['(self.roi_size, self.roi_size)', 'self.spatial_scale'], {}), '((self.roi_size, self.roi_size), self.spatial_scale)\n', (3840, 3892), False, 'from torchvision.ops import RoIPool\n'), ((4126, 4148), 'torch.zeros', 't.zeros', (['rois.shape[0]'], {}), '(rois.shape[0])\n', (4133, 4148), True, 'import torch as t\n'), ((4283, 4319), 'torch.cat', 't.cat', (['[index[:, None], rois]'], {'dim': '(1)'}), '([index[:, None], rois], dim=1)\n', (4288, 4319), True, 'import torch as t\n'), ((4187, 4205), 'array_tools.totensor', 'at.totensor', (['index'], {}), '(index)\n', (4198, 4205), True, 'import array_tools as at\n'), ((4230, 4247), 'array_tools.totensor', 'at.totensor', (['rois'], {}), '(rois)\n', (4241, 4247), True, 'import array_tools as at\n')] |
import os
import os.path as op
import pickle
import pandas as pd
import mne
too_noisy = ["CC220352"]
def get_params(dataset):
if os.path.exists("/home/parietal/"):
subjects_dir = get_subjects_dir(dataset)
data_path = "~/data/%s/" % dataset
data_path = op.expanduser(data_path)
subject = get_subjects_list(dataset)[0]
info_fname = get_raw_fname(dataset, subject)
else:
data_path = "~/Dropbox/neuro_transport/code/"
data_path += "mtw_experiments/meg/%s/" % dataset
data_path = op.expanduser(data_path)
subjects_dir = data_path + "subjects/"
info_fname = "/Users/hichamjanati/Documents/work/mne-python/mne/"
info_fname += "io/tests/data/test_raw.fif"
subjects_dir = data_path + "subjects/"
info = mne.io.read_info(info_fname, verbose=False)
meg_ind = mne.pick_types(info, eeg=False)
info = mne.pick_info(info, meg_ind)
grad_ind = mne.pick_types(info, meg="grad")
params = dict(data_path=data_path, grad_indices=grad_ind, info=info,
subjects_dir=subjects_dir)
return params
def get_subjects_dir(dataset_name):
if dataset_name == "camcan":
path = "/storage/store/data/camcan-mne/freesurfer/"
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "subjects/"
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_trans_fname(dataset_name, subject):
if dataset_name == "camcan":
path = "/storage/store/data/camcan-mne/trans/"
path += "sub-%s-trans.fif" % subject
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "ds117/%s/MEG/%s-trans.fif" % (subject, subject)
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_bem_fname(dataset_name, subject):
if dataset_name == "camcan":
path = "/storage/store/data/camcan-mne/freesurfer/"
path += "%s/bem/%s-meg-bem.fif" % (subject, subject)
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "subjects/%s/bem/%s-5120-bem-sol.fif" % (subject, subject)
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_raw_fname(dataset_name, subject, task="passive"):
if dataset_name == "camcan":
path = "/storage/store/data/camcan/camcan47/cc700/meg/pipeline/"
path += "release004/data/aamod_meg_get_fif_00001/%s/%s/" % (subject,
task)
path += "%s_raw.fif" % task
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "ds117/%s/MEG/run_01_raw.fif" % subject
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_ave_fname(dataset_name, subject, task="passive"):
if dataset_name == "camcan":
path = "/home/parietal/hjanati/data/camcan/meg/"
path += "%s/%s_stim_sensors-ave.fif" % (subject, task)
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "MEG/%s/%s_highpass-NoneHz-ave.fif" % (subject, subject)
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_cov_fname(dataset_name, subject, task="passive"):
if dataset_name == "camcan":
path = "/home/parietal/hjanati/data/camcan/meg/"
path += "%s/%s_stim_sensors-cov.fif" % (subject, task)
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "MEG/%s/%s_highpass-NoneHz-cov.fif" % (subject, subject)
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_epo_fname(dataset_name, subject, task="passive"):
if dataset_name == "camcan":
path = "/home/parietal/hjanati/data/camcan/meg/"
path += "%s/%s_stim_sensors-epo.fif" % (subject, task)
elif dataset_name == "ds117":
path = "/storage/store/work/agramfort/mne-biomag-group-demo/"
path += "MEG/%s/%s_highpass-NoneHz-epo.fif" % (subject, subject)
else:
raise ValueError("Unknown dataset %s." % dataset_name)
return path
def get_fwd_fname(dataset_name, subject, resolution="ico4"):
path = "/home/parietal/hjanati/data/%s/bem/" % dataset_name
path += "%s-%s-fwd.fif" % (subject, resolution)
return path
def get_subjects_list(dataset_name, age_min=0, age_max=30, simu=False):
if os.path.exists("/home/parietal/"):
if dataset_name == "camcan":
df = pd.read_csv("/home/parietal/hjanati/data/camcan/age.csv")
path = "/storage/store/data/camcan-mne/trans/"
df = df[(df.age < age_max) & (df.age > age_min)]
all_subjects = list(df.Observations)
subjects = []
for subject in all_subjects:
fname0 = get_raw_fname(dataset_name, subject)
fname1 = get_bem_fname(dataset_name, subject)
fname2 = path + "../freesurfer/%s/surf/lh.white" % subject
fname3 = get_trans_fname(dataset_name, subject)
fname4 = get_ave_fname(dataset_name, subject)
check0 = os.path.exists(fname0)
check1 = os.path.exists(fname1)
check2 = os.path.exists(fname2)
check3 = os.path.exists(fname3)
check4 = os.path.exists(fname4)
check5 = subject not in too_noisy or simu
if check1 * check2 * check3 * check0 * check4 * check5:
subjects.append(subject)
elif dataset_name == "ds117":
subjects = ["sub%03d" % i for i in range(1, 20)]
else:
raise ValueError("Unknown dataset %s." % dataset_name)
fname = "/home/parietal/hjanati/data/%s/info/" % dataset_name
fname += "subjects.list"
f = open(fname, "wb")
pickle.dump(subjects, f)
else:
data_path = "~/Dropbox/neuro_transport/code/"
data_path += "mtw_experiments/meg/%s/" % dataset_name
data_path = os.path.expanduser(data_path)
f = open(data_path + "info/subjects.list", "rb")
subjects = pickle.load(f)
return subjects
if __name__ == "__main__":
subjects_camcan = get_subjects_list("camcan")
subjects_ds117 = get_subjects_list("ds117")
| [
"os.path.exists",
"pickle.dump",
"pandas.read_csv",
"mne.pick_types",
"mne.pick_info",
"mne.io.read_info",
"pickle.load",
"os.path.expanduser"
] | [((136, 169), 'os.path.exists', 'os.path.exists', (['"""/home/parietal/"""'], {}), "('/home/parietal/')\n", (150, 169), False, 'import os\n'), ((805, 848), 'mne.io.read_info', 'mne.io.read_info', (['info_fname'], {'verbose': '(False)'}), '(info_fname, verbose=False)\n', (821, 848), False, 'import mne\n'), ((863, 894), 'mne.pick_types', 'mne.pick_types', (['info'], {'eeg': '(False)'}), '(info, eeg=False)\n', (877, 894), False, 'import mne\n'), ((906, 934), 'mne.pick_info', 'mne.pick_info', (['info', 'meg_ind'], {}), '(info, meg_ind)\n', (919, 934), False, 'import mne\n'), ((950, 982), 'mne.pick_types', 'mne.pick_types', (['info'], {'meg': '"""grad"""'}), "(info, meg='grad')\n", (964, 982), False, 'import mne\n'), ((4692, 4725), 'os.path.exists', 'os.path.exists', (['"""/home/parietal/"""'], {}), "('/home/parietal/')\n", (4706, 4725), False, 'import os\n'), ((283, 307), 'os.path.expanduser', 'op.expanduser', (['data_path'], {}), '(data_path)\n', (296, 307), True, 'import os.path as op\n'), ((550, 574), 'os.path.expanduser', 'op.expanduser', (['data_path'], {}), '(data_path)\n', (563, 574), True, 'import os.path as op\n'), ((6138, 6162), 'pickle.dump', 'pickle.dump', (['subjects', 'f'], {}), '(subjects, f)\n', (6149, 6162), False, 'import pickle\n'), ((6309, 6338), 'os.path.expanduser', 'os.path.expanduser', (['data_path'], {}), '(data_path)\n', (6327, 6338), False, 'import os\n'), ((6415, 6429), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6426, 6429), False, 'import pickle\n'), ((4781, 4838), 'pandas.read_csv', 'pd.read_csv', (['"""/home/parietal/hjanati/data/camcan/age.csv"""'], {}), "('/home/parietal/hjanati/data/camcan/age.csv')\n", (4792, 4838), True, 'import pandas as pd\n'), ((5425, 5447), 'os.path.exists', 'os.path.exists', (['fname0'], {}), '(fname0)\n', (5439, 5447), False, 'import os\n'), ((5473, 5495), 'os.path.exists', 'os.path.exists', (['fname1'], {}), '(fname1)\n', (5487, 5495), False, 'import os\n'), ((5521, 5543), 'os.path.exists', 'os.path.exists', (['fname2'], {}), '(fname2)\n', (5535, 5543), False, 'import os\n'), ((5569, 5591), 'os.path.exists', 'os.path.exists', (['fname3'], {}), '(fname3)\n', (5583, 5591), False, 'import os\n'), ((5617, 5639), 'os.path.exists', 'os.path.exists', (['fname4'], {}), '(fname4)\n', (5631, 5639), False, 'import os\n')] |
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--config", help="path to config")
parser.add_argument("--checkpoint", default='vox-cpk.pth.tar', help="path to checkpoint to restore")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--no-pad", dest="no_pad", action="store_true", help="don't pad output image")
parser.add_argument("--enc_downscale", default=1, type=float, help="Downscale factor for encoder input. Improves performance with cost of quality.")
parser.add_argument("--virt-cam", type=int, default=0, help="Virtualcam device ID")
parser.add_argument("--no-stream", action="store_true", help="On Linux, force no streaming")
parser.add_argument("--verbose", action="store_true", help="Print additional information")
parser.add_argument("--hide-rect", action="store_true", default=False, help="Hide the helper rectangle in preview window")
parser.add_argument("--avatars", default="./avatars", help="path to avatars directory")
parser.add_argument("--is-worker", action="store_true", help="Whether to run this process as a remote GPU worker")
parser.add_argument("--is-client", action="store_true", help="Whether to run this process as a client")
parser.add_argument("--in-port", type=int, default=5557, help="Remote worker input port")
parser.add_argument("--out-port", type=int, default=5558, help="Remote worker output port")
parser.add_argument("--in-addr", type=str, default=None, help="Socket address for incoming messages, like example.com:5557")
parser.add_argument("--out-addr", type=str, default=None, help="Socker address for outcoming messages, like example.com:5558")
parser.add_argument("--jpg_quality", type=int, default=95, help="Jpeg copression quality for image transmission")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
parser.set_defaults(no_pad=False)
opt = parser.parse_args()
if opt.is_client and (opt.in_addr is None or opt.out_addr is None):
raise ValueError("You have to set --in-addr and --out-addr")
| [
"argparse.ArgumentParser"
] | [((46, 62), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (60, 62), False, 'from argparse import ArgumentParser\n')] |
import csv
import os
def write_seed_hit_matrix(master_seed_hits, seed_pairs, out_path):
"""
Function to write the comma delimited file indicating the number of hits for each seed sequence pair
:param master_seed_hits: Dict with keys being seed sequence pairs, values being a dict with genomes being keys and values being the number of hits for a given seed sequence pair
:param seed_pairs: Dict of seed sequences that are matched into pairs
:param out_path: Filepath to the output folder
:return: None
"""
with open(os.path.join(out_path, 'contig_hit_matrix.csv'), 'w') as out_file:
header = ['genome'] + list(seed_pairs.keys())
writer = csv.DictWriter(out_file, fieldnames=header)
# Write the field names or header row in file
writer.writeheader()
# Write the remaining lines
keys = list(master_seed_hits.keys())
keys.sort()
for line in keys:
writer.writerow(master_seed_hits[line])
out_file.close()
def write_annotation_num_matrix(master_annotation_hits, seed_pairs, out_path):
"""
Function to write a comma separated file giving the number of annotations in regions
:param master_annotation_hits: Dict with keys being seed sequence pairs, values being a dict with genomes being keys and values being the number of annotations within an extracted region
:param seed_pairs: Dict of seed sequences that are matched into pairs
:param out_path: Filepath to the output folder
:return: None
"""
with open(os.path.join(out_path, 'annotation_num_matrix.csv'), 'w') as out_file:
header = ['genome'] + list(seed_pairs.keys())
writer = csv.DictWriter(out_file, fieldnames=header)
# Write the field names or header row in file
writer.writeheader()
# Write the remaining lines
keys = list(master_annotation_hits.keys())
keys.sort()
for line in keys:
writer.writerow(master_annotation_hits[line])
out_file.close()
def write_seed_hit_evidence(master_seed_evidence, seed_pairs, out_path):
"""
Function to write a comma separated file giving the evidence level for each region identified
:param master_seed_evidence:
:param seed_pairs: Dict of seed sequences that are matched into pairs
:param out_path: Filepath to the output folder
:return: None
"""
with open(os.path.join(out_path, 'master_seed_evidence.csv'), 'w') as out_file:
header = ['genome'] + list(seed_pairs.keys())
writer = csv.DictWriter(out_file, fieldnames=header)
# Write the field names or header row in file
writer.writeheader()
# Write the remaining lines
keys = list(master_seed_evidence.keys())
keys.sort()
for line in keys:
writer.writerow(master_seed_evidence[line])
out_file.close()
def write_inter_seed_dist(master_inter_seed_dist, seed_pairs, out_path):
"""
Function to write a comma separated file giving the number of nucleotides in regions identified
:param master_inter_seed_dist:
:param seed_pairs: Dict of seed sequences that are matched into pairs
:param out_path: Filepath to the output folder
:return: None
"""
with open(os.path.join(out_path, 'inter_seed_distance.csv'), 'w') as out_file:
header = ['genome'] + list(seed_pairs.keys())
writer = csv.DictWriter(out_file, fieldnames=header)
# Write the field names or header row in file
writer.writeheader()
# Write the remaining lines
keys = list(master_inter_seed_dist.keys())
keys.sort()
for line in keys:
writer.writerow(master_inter_seed_dist[line])
out_file.close()
| [
"csv.DictWriter",
"os.path.join"
] | [((688, 731), 'csv.DictWriter', 'csv.DictWriter', (['out_file'], {'fieldnames': 'header'}), '(out_file, fieldnames=header)\n', (702, 731), False, 'import csv\n'), ((1698, 1741), 'csv.DictWriter', 'csv.DictWriter', (['out_file'], {'fieldnames': 'header'}), '(out_file, fieldnames=header)\n', (1712, 1741), False, 'import csv\n'), ((2564, 2607), 'csv.DictWriter', 'csv.DictWriter', (['out_file'], {'fieldnames': 'header'}), '(out_file, fieldnames=header)\n', (2578, 2607), False, 'import csv\n'), ((3429, 3472), 'csv.DictWriter', 'csv.DictWriter', (['out_file'], {'fieldnames': 'header'}), '(out_file, fieldnames=header)\n', (3443, 3472), False, 'import csv\n'), ((550, 597), 'os.path.join', 'os.path.join', (['out_path', '"""contig_hit_matrix.csv"""'], {}), "(out_path, 'contig_hit_matrix.csv')\n", (562, 597), False, 'import os\n'), ((1556, 1607), 'os.path.join', 'os.path.join', (['out_path', '"""annotation_num_matrix.csv"""'], {}), "(out_path, 'annotation_num_matrix.csv')\n", (1568, 1607), False, 'import os\n'), ((2423, 2473), 'os.path.join', 'os.path.join', (['out_path', '"""master_seed_evidence.csv"""'], {}), "(out_path, 'master_seed_evidence.csv')\n", (2435, 2473), False, 'import os\n'), ((3289, 3338), 'os.path.join', 'os.path.join', (['out_path', '"""inter_seed_distance.csv"""'], {}), "(out_path, 'inter_seed_distance.csv')\n", (3301, 3338), False, 'import os\n')] |
from tkinter import ttk
from tkinter import filedialog
from tkinter import *
import sqlite3 #verwendet für mySQL Datenbank
import xml.etree.ElementTree as ET
from sympy import *
import os
import datetime # wird benötigt für "Test-Einstellungen benutzen"
from datetime import datetime # wird benötigt für "delete all entrys?" ??
import pathlib
import shutil # zum kopieren und zippen von Dateien
from PIL import ImageTk, Image # Zur Preview von ausgewählten Bildern
import pandas as pd
from pandas.core.reshape.util import cartesian_product
import numpy as np
import re
from functools import partial
import time
from tkinter import messagebox
from tkscrolledframe import ScrolledFrame #Bewegbares Fesnter (Scrollbalken)
### Eigene Dateien / Module
from Test_Generator_Module import test_generator_modul_datenbanken_anzeigen
from Test_Generator_Module import test_generator_modul_datenbanken_erstellen
from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung
from Test_Generator_Module import test_generator_modul_ilias_test_struktur
from Test_Generator_Module import test_generator_modul_ilias_import_test_datei
#from Test_Generator_Module import test_generator_modul_zeigerdiagramme
class Formelfrage:
############## SET IMAGE VARIABLES
############## DEFINE FORMELFRAGE PATHS
############## FRAMES
# add_image_to_description_and_create_labels
# add_image_to_description_and_delete_labels
############## BEARBEITUNGSDAUER
# selected_hours
# selected_minutes
# selecteds_seconds
############## ÜBERSCHRIFTEN / LABELS FÜR EINGABEFELDER-MATRIX
############## EINGABEFELDER / ENTRYS FÜR EINGABEFELDER-MATRIX
# answer_selected
############## AUSWAHL DER EINHEITEN FÜR VARIABLEN ---- DERZEIT NICHT AKTIV
############## ÜBERSCHRIFTEN / LABELS FÜR EINGABEFELDER-MATRIX
############## EINGABEFELDER / ENTRYS FÜR EINGABEFELDER-MATRIX
############## EINHEITEN FÜR ERGEBNISSE DERZEIT DEAKTIVIERT
# result_selected
#____ INIT end
# ff_variable_show_or_remove
# ff_result_show_or_remove
# unit_table
# ff_replace_character_in_xml_file
# ff_calculate_value_range_function_in_GUI
# ff_calculate_value_range_from_formula_in_GUI
############## DATENBANK FUNKTIONEN
# ff_save_id_to_db
# ff_load_id_from_db
# ff_edit_id_from_db
# ff_delete_id_from_db
# ff_load_id_from_db
# ff_clear_GUI
def __init__(self, app, formelfrage_tab, project_root_path):
self.formelfrage_tab = formelfrage_tab
############## SET QUESTION_TYPE SPECIFIC NAMES FOR DATABASE AND WORBOOK/SHEET
# Name des Fragentyps
self.ff_question_type_name = "formelfrage"
# Name für Datenbank und Tabelle
self.ff_database = "ilias_formelfrage_db.db"
self.ff_database_table = "formelfrage_table"
# Name für Tabellenkalulations-Datei und Tabelle
self.ff_xlsx_workbook_name = "Formelfrage_DB_export_file"
self.ff_xlsx_worksheet_name = "Formelfrage - Database"
############## SET IMAGE VARIABLES
# Die Variablen müssen am Anfang des Programms gesetzt werden, um diese an andere Funktionen weitergeben zu können
self.ff_description_img_name_1 = ""
self.ff_description_img_name_2 = ""
self.ff_description_img_name_3 = ""
self.ff_description_img_data_1 = ""
self.ff_description_img_data_2 = ""
self.ff_description_img_data_3 = ""
self.ff_description_img_path_1 = ""
self.ff_description_img_path_2 = ""
self.ff_description_img_path_3 = ""
############## DEFINE FORMELFRAGE PATHS
# Pfad des Projekts und des FF-Moduls
self.project_root_path = project_root_path
self.formelfrage_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-Formelfrage"))
self.formelfrage_files_path_pool_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe"))
# Pfad für die Datenbank
self.database_formelfrage_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", self.ff_database))
# Pfad für ILIAS-Test Vorlage
self.formelfrage_test_qti_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
self.formelfrage_test_tst_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
# Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
self.formelfrage_test_qti_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
self.formelfrage_test_tst_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
self.formelfrage_test_img_file_path = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
# Pfad für ILIAS-Pool Vorlage
self.formelfrage_pool_qti_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
self.formelfrage_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
# Pfad für ILIAS-Pool Dateien (zum hochladen in ILIAS)
# Die Pfade für die qti.xml und qpl.xml werden erst zur Laufzeit bestimmt.
# Die Deklaration ist daher unter "class Create_Formelfrage_Pool"
self.formelfrage_pool_directory_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe"))
###################### DATENBANK ENTRIES UND INDEX DICT ERSTELLEN ###################
# Dictionary aus zwei Listen erstellen
self.ff_db_find_entries = []
self.ff_db_find_indexes = []
self.ff_db_column_names_list = []
self.ff_collection_of_question_titles = []
connect = sqlite3.connect(self.database_formelfrage_path)
cursor = connect.execute('select * from ' + self.ff_database_table)
self.ff_db_column_names_list = list(map(lambda x: x[0], cursor.description))
self.db_column_names_string = ', :'.join(self.ff_db_column_names_list)
self.db_column_names_string = ":" + self.db_column_names_string
for i in range(len(self.ff_db_column_names_list)):
self.ff_db_find_indexes.append(i)
"""
# Durch list(map(lambdax: x[0])) werden die Spaltennamen aus der DB ausgelesen
cursor = conn.execute('select * from ' + self.ff_database_table)
db_column_names_list = list(map(lambda x: x[0], cursor.description))
db_column_names_string = ', :'.join(db_column_names_list)
db_column_names_string = ":" + db_column_names_string
"""
self.ff_db_entry_to_index_dict = dict(zip((self.ff_db_column_names_list), (self.ff_db_find_indexes)))
connect.commit()
connect.close()
############## FRAMES
self.ff_frame_ilias_test_title = LabelFrame(self.formelfrage_tab, text="Testname & Autor", padx=5, pady=5)
self.ff_frame_ilias_test_title.grid(row=0, column=0, padx=10, pady=10, sticky="NW")
self.ff_frame = LabelFrame(self.formelfrage_tab, text="Formelfrage", padx=5, pady=5)
self.ff_frame.grid(row=1, column=0, padx=10, pady=10, sticky="NW")
self.ff_frame_question_attributes = LabelFrame(self.formelfrage_tab, text="Fragen Attribute", padx=5, pady=5)
self.ff_frame_question_attributes.grid(row=2, column=0, padx=10, pady=10, sticky="NE")
self.ff_frame_database = LabelFrame(self.formelfrage_tab, text="Formelfrage-Datenbank", padx=5, pady=5)
self.ff_frame_database.grid(row=2, column=0, padx=10, pady=10, sticky="NW")
self.ff_frame_create_formelfrage_test = LabelFrame(self.formelfrage_tab, text="FF-Test erstellen", padx=5, pady=5)
self.ff_frame_create_formelfrage_test.grid(row=2, column=0, padx=10, pady=120, sticky="NE")
self.ff_frame_taxonomy_settings = LabelFrame(self.formelfrage_tab, text="Taxonomie Einstellungen", padx=5, pady=5)
self.ff_frame_taxonomy_settings.grid(row=0, column=1, padx=10, pady=10, sticky="NW")
self.ff_frame_question_description_functions = LabelFrame(self.formelfrage_tab, text="Fragentext Funktionen", padx=5, pady=5)
self.ff_frame_question_description_functions.grid(row=1, column=1, padx=10, pady=10, sticky="NW")
self.ff_frame_excel_import_export = LabelFrame(self.formelfrage_tab, text="Excel Import/Export", padx=5, pady=5)
self.ff_frame_excel_import_export.grid(row=2, column=1, padx=10, pady=10, sticky="NW")
self.ff_frame_description_picture = LabelFrame(self.formelfrage_tab, text="Fragen-Text Bild", padx=5, pady=5)
self.ff_frame_description_picture.grid(row=1, column=2, padx=10, pady=10, sticky="NW")
self.ff_frame_vector_diagram = LabelFrame(self.formelfrage_tab, text="Zeigerdiagramme", padx=5, pady=5)
#self.ff_frame_vector_diagram.grid(row=2, column=1, padx=10, pady=200, sticky="NW")
###################### "Testname & Autor" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
self.ff_ilias_test_title_label = Label(self.ff_frame_ilias_test_title, text="Name des Tests")
self.ff_ilias_test_title_label.grid(row=0, column=0, sticky=W)
self.ff_ilias_test_title_entry = Entry(self.ff_frame_ilias_test_title, width=60)
self.ff_ilias_test_title_entry.grid(row=0, column=1, sticky=W, padx=30)
self.ff_ilias_test_autor_label = Label(self.ff_frame_ilias_test_title, text="Autor")
self.ff_ilias_test_autor_label.grid(row=1, column=0, sticky=W)
self.ff_ilias_test_autor_entry = Entry(self.ff_frame_ilias_test_title, width=60)
self.ff_ilias_test_autor_entry.grid(row=1, column=1, sticky=W, padx=30)
###################### TEST SETTINGS
self.show_test_settings_formula_tab = Button(self.formelfrage_tab, text="Test-Einstellungen",command=lambda: GUI_settings_window.__init__(self))
#self.show_test_settings_formula_tab.grid(row=0, column=0, pady=20, sticky=NE)
######################################
###################### "Fragen-Text Bild" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
# Hinzufügen Bild 1
self.ff_var_use_image_1 = IntVar()
self.ff_check_use_image_1_in_description = Checkbutton(self.ff_frame_question_description_functions, text="Bild 1 hochladen?", variable=self.ff_var_use_image_1, onvalue=1, offvalue=0)
self.ff_check_use_image_1_in_description.deselect()
self.ff_check_use_image_1_in_description.grid(row=5, column=0, sticky=W, padx=90, pady=(10, 0))
# Hinzufügen Bild 2
self.ff_var_use_image_2 = IntVar()
self.ff_check_use_image_2_in_description = Checkbutton(self.ff_frame_question_description_functions, text="Bild 2 hochladen?", variable=self.ff_var_use_image_2, onvalue=1, offvalue=0)
self.ff_check_use_image_2_in_description.deselect()
self.ff_check_use_image_2_in_description.grid(row=6, column=0, sticky=W, padx=90)
# Hinzufügen Bild 3
self.ff_var_use_image_3 = IntVar()
self.ff_check_use_image_3_in_description = Checkbutton(self.ff_frame_question_description_functions, text="Bild 3 hochladen?", variable=self.ff_var_use_image_3, onvalue=1, offvalue=0)
self.ff_check_use_image_3_in_description.deselect()
self.ff_check_use_image_3_in_description.grid(row=7, column=0, sticky=W, padx=90)
# Buttons - Bild hinzufügen & Bild löschen
self.ff_add_img_to_description_btn = Button(self.ff_frame_question_description_functions, text="Bild hinzufügen", command=lambda: ff_add_image_to_description_and_create_labels())
self.ff_add_img_to_description_btn.grid(row=8, column=0, sticky=W, padx = 10, pady=(20,0))
# Bild zum Fragentext hinzufügen
def ff_add_image_to_description_and_create_labels():
# Erstelle Labels
self.ff_question_description_img_1_filename_label = Label(self.ff_frame_description_picture, text=self.ff_description_img_name_1)
self.ff_question_description_img_2_filename_label = Label(self.ff_frame_description_picture, text=self.ff_description_img_name_2)
self.ff_question_description_img_3_filename_label = Label(self.ff_frame_description_picture, text=self.ff_description_img_name_3)
self.ff_description_img_name_1, self.ff_description_img_name_2, self.ff_description_img_name_3, self.ff_description_img_path_1, self.ff_description_img_path_2, self.ff_description_img_path_3, self.ff_question_description_img_1_filename_label, self.ff_question_description_img_2_filename_label, self.ff_question_description_img_3_filename_label = test_generator_modul_ilias_test_struktur.Additional_Funtions.add_image_to_description(
self,
self.project_root_path,
self.ff_var_use_image_1.get(),
self.ff_var_use_image_2.get(),
self.ff_var_use_image_3.get(),
self.ff_frame_description_picture,
self.ff_description_img_name_1,
self.ff_description_img_name_2,
self.ff_description_img_name_3,
self.ff_description_img_path_1,
self.ff_description_img_path_2,
self.ff_description_img_path_3,
)
self.ff_remove_img_from_description_btn = Button(self.ff_frame_question_description_functions, text="Bild entfernen", command=lambda: ff_add_image_to_description_and_delete_labels())
self.ff_remove_img_from_description_btn.grid(row=8, column=0, sticky=W, padx=120, pady=(20,0))
# Bild aus Fragentext entfernen
def ff_add_image_to_description_and_delete_labels():
self.ff_description_img_name_1, self.ff_description_img_name_2, self.ff_description_img_name_3 = test_generator_modul_ilias_test_struktur.Additional_Funtions.delete_image_from_description(
self, self.ff_var_use_image_1.get(),
self.ff_var_use_image_2.get(),
self.ff_var_use_image_3.get(),
self.ff_question_description_img_1_filename_label,
self.ff_question_description_img_2_filename_label,
self.ff_question_description_img_3_filename_label,
self.ff_description_img_name_1,
self.ff_description_img_name_2,
self.ff_description_img_name_3,
)
###################### "Taxonomie Einstellungen" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
self.ff_taxonomy_settings_btn = Button(self.ff_frame_taxonomy_settings, text="Taxonomie-Einstellungen",command=lambda: test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__(self))
self.ff_taxonomy_settings_btn.grid(row=3, column=0, columnspan = 2, padx=10, sticky="W")
###################### "Fragen Attribute" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_question_difficulty_label = Label(self.ff_frame_question_attributes, text="Schwierigkeit")
self.ff_question_difficulty_label.grid(row=0, column=0, pady=5, padx=5, sticky=W)
self.ff_question_difficulty_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_difficulty_entry.grid(row=0, column=1, pady=5, padx=5, sticky=W)
self.ff_question_category_label = Label(self.ff_frame_question_attributes, text="Fragenkategorie")
self.ff_question_category_label.grid(row=1, column=0, pady=5, padx=5, sticky=W)
self.ff_question_category_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_category_entry.grid(row=1, column=1, pady=5, padx=5, sticky=W)
self.ff_question_type_label = Label(self.ff_frame_question_attributes, text="Fragen-Typ")
self.ff_question_type_label.grid(row=0, column=2, pady=5, padx=5, sticky=W)
self.ff_question_type_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_type_entry.grid(row=0, column=3, pady=5, padx=5, sticky=W)
self.ff_question_type_entry.insert(0, "Formelfrage")
self.ff_question_pool_tag_label = Label(self.ff_frame_question_attributes, text="Pool-Tag")
self.ff_question_pool_tag_label.grid(row=1, column=2, pady=5, padx=5, sticky=W)
self.ff_question_pool_tag_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_pool_tag_entry.grid(row=1, column=3, pady=5, padx=5, sticky=W)
###################### "FF-Test erstellen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
# Button "Formelfrage-Test erstellen"
self.create_formelfrage_test_btn = Button(self.ff_frame_create_formelfrage_test, text="FF-Test erstellen", command=lambda: Create_Formelfrage_Test.__init__(self, self.ff_db_entry_to_index_dict))
self.create_formelfrage_test_btn.grid(row=0, column=0, sticky=W)
self.create_formelfrage_test_entry = Entry(self.ff_frame_create_formelfrage_test, width=15)
self.create_formelfrage_test_entry.grid(row=0, column=1, sticky=W, padx=0)
# Checkbox "Test-Einstellungen übernehmen?"
self.create_test_settings_label = Label(self.ff_frame_create_formelfrage_test, text="Test-Einstellungen übernehmen?")
self.create_test_settings_label.grid(row=1, column=0, pady=5, padx=5, sticky=W)
self.var_test_settings = IntVar()
self.check_test_settings = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.var_test_settings, onvalue=1, offvalue=0)
self.check_test_settings.deselect()
self.check_test_settings.grid(row=1, column=1, sticky=W)
# Checkbox "Latex für Fragentext nutzen?"
self.ff_use_latex_on_text_label = Label(self.ff_frame_create_formelfrage_test, text="Latex für Fragentext nutzen?")
self.ff_use_latex_on_text_label.grid(row=2, column=0, sticky=W, padx=5)
self.ff_var_use_latex_on_text_check = IntVar()
self.ff_use_latex_on_text_check = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_use_latex_on_text_check, onvalue=1, offvalue=0)
self.ff_use_latex_on_text_check.deselect()
self.ff_use_latex_on_text_check.grid(row=2, column=1, sticky=W)
# Checkbox "Alle Einträge aus der DB erzeugen?"
self.ff_create_question_pool_all_label = Label(self.ff_frame_create_formelfrage_test, text="Alle Einträge aus der DB erzeugen?")
self.ff_create_question_pool_all_label.grid(row=4, column=0, pady=(10,0), padx=5, sticky=W)
self.ff_var_create_question_pool_all_check = IntVar()
self.ff_create_question_pool_all = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_create_question_pool_all_check, onvalue=1, offvalue=0)
#self.ff_var_create_question_pool_all_check.set(0)
self.ff_create_question_pool_all.grid(row=4, column=1, sticky=W, pady=(10,0))
# Checkbox "Mehrere Fragenpools Taxonomie getrennt erstellen?"
self.ff_create_multiple_question_pools_from_tax_label = Label(self.ff_frame_create_formelfrage_test, text="Mehrere Fragenpools (Taxonomie getrennt) erstellen?")
self.ff_create_multiple_question_pools_from_tax_label.grid(row=5, column=0, pady=(10,0), padx=5, sticky=W)
self.ff_var_create_multiple_question_pools_from_tax_check = IntVar()
self.ff_create_multiple_question_pools_from_tax = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_create_multiple_question_pools_from_tax_check, onvalue=1, offvalue=0)
#self.ff_var_create_question_pool_all_check.set(0)
self.ff_create_multiple_question_pools_from_tax.grid(row=5, column=1, sticky=W, pady=(10,0))
# Checkbox "Taxonomie für getrennte Pools behalten?"
self.ff_remove_pool_tags_for_tax_label = Label(self.ff_frame_create_formelfrage_test, text=" ---> Taxonomie für getrennte Pools \"löschen\"?")
self.ff_remove_pool_tags_for_tax_label.grid(row=6, column=0, pady=(0,0), padx=5, sticky=W)
self.ff_var_remove_pool_tags_for_tax_check = IntVar()
self.ff_remove_pool_tags_for_tax = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_remove_pool_tags_for_tax_check, onvalue=1, offvalue=0)
#self.ff_var_create_question_pool_all_check.set(0)
self.ff_remove_pool_tags_for_tax.grid(row=6, column=1, sticky=W, pady=(0,0))
# Checkbox "Wertebreiche für Fragenpool berechnen?"
self.ff_calculate_value_range_from_db_entries_label = Label(self.ff_frame_create_formelfrage_test, text="Wertebreiche für Fragenpool berechnen?")
self.ff_calculate_value_range_from_db_entries_label.grid(row=7, column=0, pady=(10,0), padx=5, sticky=W)
self.ff_var_calculate_value_range_from_db_entries_check = IntVar()
self.ff_calculate_value_range_from_db_entries = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_calculate_value_range_from_db_entries_check, onvalue=1, offvalue=0)
self.ff_var_create_question_pool_all_check.set(0)
self.ff_calculate_value_range_from_db_entries.grid(row=7, column=1, sticky=W, pady=(10,0))
# Button "Formelfrage-Fragenpool erstellen"
self.create_formelfrage_pool_btn = Button(self.ff_frame_create_formelfrage_test, text="FF-Pool erstellen", command=lambda: Create_Formelfrage_Pool.__init__(self, self.ff_db_entry_to_index_dict, self.ff_var_create_question_pool_all_check.get(), self.ff_var_create_multiple_question_pools_from_tax_check.get(), self.ff_var_calculate_value_range_from_db_entries_check.get()))
self.create_formelfrage_pool_btn.grid(row=3, column=0, sticky=W, pady=(30,0))
self.create_formelfrage_pool_entry = Entry(self.ff_frame_create_formelfrage_test, width=15)
self.create_formelfrage_pool_entry.grid(row=3, column=1, sticky=W, padx=0, pady=(30,0))
###################### "Formelfrage-Datenbank" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_database_show_db_formelfrage_btn = Button(self.ff_frame_database, text="FF - Datenbank anzeigen", command=lambda: test_generator_modul_datenbanken_anzeigen.MainGUI.__init__(self, self.database_formelfrage_path, self.ff_database_table))
self.ff_database_show_db_formelfrage_btn.grid(row=0, column=0, sticky=W, pady=5)
self.ff_database_save_id_to_db_formelfrage_btn = Button(self.ff_frame_database, text="Speichern unter neuer ID", command=lambda: Formelfrage.ff_save_id_to_db(self))
self.ff_database_save_id_to_db_formelfrage_btn.grid(row=1, column=0, sticky=W, pady=5)
self.ff_database_delete_id_from_db_btn = Button(self.ff_frame_database, text="ID Löschen", command=lambda: Formelfrage.ff_delete_id_from_db(self))
self.ff_database_delete_id_from_db_btn.grid(row=6, column=0, sticky=W, pady=5)
self.ff_delete_box = Entry(self.ff_frame_database, width=10)
self.ff_delete_box.grid(row=6, column=0, padx=80, sticky=W)
self.ff_database_new_question_btn = Button(self.ff_frame_database, text="GUI Einträge leeren", command=lambda: Formelfrage.ff_clear_GUI(self))
self.ff_database_new_question_btn.grid(row=8, column=0, sticky=W, pady=5)
self.ff_database_edit_btn = Button(self.ff_frame_database, text="Aktuellen Eintrag editieren", command=lambda: Formelfrage.ff_edit_id_from_db(self))
self.ff_database_edit_btn.grid(row=3, column=0, sticky=W, pady=5)
self.ff_database_load_id_btn = Button(self.ff_frame_database, text="ID Laden", command=lambda: Formelfrage.ff_load_id_from_db(self, self.ff_db_entry_to_index_dict))
self.ff_database_load_id_btn.grid(row=4, column=0, sticky=W, pady=(15,0))
self.ff_load_box = Entry(self.ff_frame_database, width=10)
self.ff_load_box.grid(row=4, column=0, sticky=W, padx=80, pady=(15,0))
# Checkbox - "Fragentext mit Highlighting?"
self.ff_highlight_question_text_label = Label(self.ff_frame_database, text="Fragentext mit Highlighting?")
self.ff_highlight_question_text_label.grid(row=5, column=0, pady=5, padx=5)
self.ff_var_highlight_question_text = IntVar()
self.ff_check_highlight_question_text = Checkbutton(self.ff_frame_database, text="", variable=self.ff_var_highlight_question_text, onvalue=1, offvalue=0)
self.ff_check_highlight_question_text.deselect()
self.ff_check_highlight_question_text.grid(row=5, column=0, sticky=E)
# Checkbox - "Alle DB Einträge löschen?"
self.ff_delete_all_label = Label(self.ff_frame_database, text="Alle DB Einträge löschen?")
self.ff_delete_all_label.grid(row=7, column=0, pady=5, padx=5)
self.ff_var_delete_all = IntVar()
self.ff_check_delete_all = Checkbutton(self.ff_frame_database, text="", variable=self.ff_var_delete_all, onvalue=1, offvalue=0)
self.ff_check_delete_all.deselect()
self.ff_check_delete_all.grid(row=7, column=0, sticky=E)
###################### "Excel Import/Export" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
# excel_import_btn
self.ff_excel_import_to_db_formelfrage_btn = Button(self.ff_frame_excel_import_export, text="Excel-Datei importieren", command=lambda: test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db(self, self.ff_question_type_name, self.ff_db_entry_to_index_dict))
self.ff_excel_import_to_db_formelfrage_btn.grid(row=0, column=1, sticky=W, pady=5, padx=10)
# excel_export_btn
self.ff_excel_export_to_xlsx_formelfrage_btn = Button(self.ff_frame_excel_import_export, text="Datenbank exportieren",command=lambda: test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx(self, self.project_root_path, self.ff_db_entry_to_index_dict, self.database_formelfrage_path, self.ff_database, self.ff_database_table, self.ff_xlsx_workbook_name, self.ff_xlsx_worksheet_name))
self.ff_excel_export_to_xlsx_formelfrage_btn.grid(row=1, column=1, sticky=W, pady=5, padx=10)
# ILIAS_testfile_import
self.ff_import_ilias_testfile_btn = Button(self.ff_frame_excel_import_export, text="ILIAS-Datei importieren",command=lambda: test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__(self, self.project_root_path))
self.ff_import_ilias_testfile_btn.grid(row=2, column=1, sticky=W, pady=(20,0), padx=10)
###################### "Fragentext Funktionen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.add_latex_term_btn = Button(self.ff_frame_question_description_functions, text="Text \"Latex\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex(self, self.ff_question_description_main_entry))
self.add_latex_term_btn.grid(row=1, column=0, padx=10, sticky="W")
self.set_text_sub_btn = Button(self.ff_frame_question_description_functions, text="Text \"Tiefgestellt\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub(self, self.ff_question_description_main_entry))
self.set_text_sub_btn .grid(row=2, column=0, padx=10, pady=(10, 0), sticky="W")
self.set_text_sup_btn = Button(self.ff_frame_question_description_functions, text="Text \"Hochgestellt\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup(self, self.ff_question_description_main_entry))
self.set_text_sup_btn.grid(row=3, column=0, padx=10, sticky="W")
self.set_text_italic_btn = Button(self.ff_frame_question_description_functions, text="Text \"Kursiv\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic(self, self.ff_question_description_main_entry))
self.set_text_italic_btn.grid(row=4, column=0, padx=10, sticky="W")
self.set_postion_for_picture_1_btn = Button(self.ff_frame_question_description_functions, text="Pos. Bild 1", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1(self, self.ff_question_description_main_entry))
self.set_postion_for_picture_1_btn.grid(row=5, column=0, padx=10, pady=(10, 0), sticky="W")
self.set_postion_for_picture_2_btn = Button(self.ff_frame_question_description_functions, text="Pos. Bild 2", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2(self, self.ff_question_description_main_entry))
self.set_postion_for_picture_2_btn.grid(row=6, column=0, padx=10, sticky="W")
self.set_postion_for_picture_3_btn = Button(self.ff_frame_question_description_functions, text="Pos. Bild 3", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3(self, self.ff_question_description_main_entry))
self.set_postion_for_picture_3_btn.grid(row=7, column=0, padx=10, sticky="W")
###################### "Zeigerdiagramme" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_vector_diagram_type =["Serienschaltung: RL", "Serienschaltung: RC", "Serienschaltung: RLC"]
self.ff_vector_diagram_type_box = ttk.Combobox(self.ff_frame_vector_diagram, value=self.ff_vector_diagram_type, width=20)
self.ff_vector_diagram_type_box.grid(row=0, column=0, sticky=W, pady=10)
self.ff_vector_diagram_U_label = Label(self.ff_frame_vector_diagram, text='Wert für U:')
self.ff_vector_diagram_U_label.grid(row=1, column=0, sticky=W)
self.ff_vector_diagram_U_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_U_entry.grid(row=1, column=0, sticky=W, padx=70)
self.ff_vector_diagram_R_label = Label(self.ff_frame_vector_diagram, text='Wert für R:')
self.ff_vector_diagram_R_label.grid(row=2, column=0, sticky=W)
self.ff_vector_diagram_R_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_R_entry.grid(row=2, column=0, sticky=W, padx=70)
self.ff_vector_diagram_L_label = Label(self.ff_frame_vector_diagram, text='Wert für L:')
self.ff_vector_diagram_L_label.grid(row=3, column=0, sticky=W)
self.ff_vector_diagram_L_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_L_entry.grid(row=3, column=0, sticky=W, padx=70)
self.ff_vector_diagram_C_label = Label(self.ff_frame_vector_diagram, text='Wert für C:')
self.ff_vector_diagram_C_label.grid(row=4, column=0, sticky=W)
self.ff_vector_diagram_C_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_C_entry.grid(row=4, column=0, sticky=W, padx=70)
self.ff_vector_diagram_freq_label = Label(self.ff_frame_vector_diagram, text='Wert für f:')
self.ff_vector_diagram_freq_label.grid(row=5, column=0, sticky=W)
self.ff_vector_diagram_freq_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_freq_entry.grid(row=5, column=0, sticky=W, padx=70)
# Spannung Diagramm erzeugen
self.ff_var_create_voltage_current_vector_diagram = IntVar()
self.ff_check_create_voltage_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Strom-/Spannungsdiagramm", variable=self.ff_var_create_voltage_current_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_voltage_vector_diagram.deselect()
self.ff_check_create_voltage_vector_diagram.grid(row=1, column=1, sticky=W)
# Impedanz Diagramm
self.ff_var_create_impedance_vector_diagram = IntVar()
self.ff_check_create_impedance_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Impedanz-Diagramm ", variable=self.ff_var_create_impedance_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_impedance_vector_diagram.deselect()
self.ff_check_create_impedance_vector_diagram.grid(row=2, column=1, sticky=W)
# Admittanz Diagramm
self.ff_var_create_admittance_vector_diagram = IntVar()
self.ff_check_create_admittance_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Admittanz-Diagramm ", variable=self.ff_var_create_admittance_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_admittance_vector_diagram.deselect()
self.ff_check_create_admittance_vector_diagram.grid(row=3, column=1, sticky=W)
# Leistungsdiagramm
self.ff_var_create_power_vector_diagram = IntVar()
self.ff_check_create_power_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Leistungsdiagramm ", variable=self.ff_var_create_power_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_power_vector_diagram.deselect()
self.ff_check_create_power_vector_diagram.grid(row=4, column=1, sticky=W)
self.ff_vector_diagram_btn = Button(self.ff_frame_vector_diagram, text="Zeigerdiagramm erzeugen", command=lambda: test_generator_modul_zeigerdiagramme.Zeigerdiagramme.__init__( self, self.ff_vector_diagram_type_box.get(),
self.ff_var_create_voltage_current_vector_diagram.get(),
self.ff_var_create_impedance_vector_diagram.get(),
self.ff_var_create_admittance_vector_diagram.get(),
self.ff_var_create_power_vector_diagram.get(),
self.ff_vector_diagram_U_entry.get(),
self.ff_vector_diagram_R_entry.get(),
self.ff_vector_diagram_L_entry.get(),
self.ff_vector_diagram_C_entry.get(),
self.ff_vector_diagram_freq_entry.get()
))
self.ff_vector_diagram_btn.grid(row=10, column=0, padx=10, pady=(10, 0), sticky="W")
###################### "Formelfrage" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_question_author_label = Label(self.ff_frame, text="Fragen-Autor")
self.ff_question_author_label.grid(row=0, column=0, sticky=W, pady=(10, 0), padx=10)
self.ff_question_author_entry = Entry(self.ff_frame, width=20)
self.ff_question_author_entry.grid(row=0, column=1, sticky=W, pady=(10, 0))
self.ff_question_title_label = Label(self.ff_frame, text="Fragen-Titel")
self.ff_question_title_label.grid(row=1, column=0, sticky=W, padx=10, pady=(10, 0))
self.ff_question_title_entry = Entry(self.ff_frame, width=60)
self.ff_question_title_entry.grid(row=1, column=1, sticky=W, pady=(10, 0))
self.ff_question_description_title_label = Label(self.ff_frame, text="Fragen-Beschreibung")
self.ff_question_description_title_label.grid(row=2, column=0, sticky=W, padx=10)
self.ff_question_description_title_entry = Entry(self.ff_frame, width=60)
self.ff_question_description_title_entry.grid(row=2, column=1, sticky=W)
self.ff_question_textfield_label = Label(self.ff_frame, text="Fragen-Text")
self.ff_question_textfield_label.grid(row=3, column=0, sticky=W, padx=10)
self.ff_bar = Scrollbar(self.ff_frame)
self.ff_question_description_main_entry = Text(self.ff_frame, height=6, width=65, font=('Helvetica', 9))
self.ff_bar.grid(row=3, column=2, sticky=W)
self.ff_question_description_main_entry.grid(row=3, column=1, pady=10, sticky=W)
self.ff_bar.config(command=self.ff_question_description_main_entry.yview)
self.ff_question_description_main_entry.config(yscrollcommand=self.ff_bar.set)
############## BEARBEITUNGSDAUER
self.ff_processing_time_label = Label(self.ff_frame, text="Bearbeitungsdauer")
self.ff_processing_time_label.grid(row=4, column=0, sticky=W, pady=(5, 0), padx=10)
self.ff_processing_time_label = Label(self.ff_frame, text="Std:")
self.ff_processing_time_label.grid(row=4, column=1, sticky=W, pady=(5, 0))
self.ff_processing_time_label = Label(self.ff_frame, text="Min:")
self.ff_processing_time_label.grid(row=4, column=1, sticky=W, padx=70, pady=(5, 0))
self.ff_processing_time_label = Label(self.ff_frame, text="Sek:")
self.ff_processing_time_label.grid(row=4, column=1, sticky=W, padx=145, pady=(5, 0))
self.ff_processingtime_hours = list(range(24))
self.ff_processingtime_minutes = list(range(60))
self.ff_processingtime_seconds = list(range(60))
self.ff_proc_hours_box = ttk.Combobox(self.ff_frame, value=self.ff_processingtime_hours, width=2)
self.ff_proc_minutes_box = ttk.Combobox(self.ff_frame, value=self.ff_processingtime_minutes, width=2)
self.ff_proc_seconds_box = ttk.Combobox(self.ff_frame, value=self.ff_processingtime_seconds, width=2)
self.ff_proc_hours_box.current(23)
self.ff_proc_minutes_box.current(0)
self.ff_proc_seconds_box.current(0)
def selected_hours(event):
self.selected_hours = self.ff_proc_hours_box.get()
def selected_minutes(event):
self.selected_minutes = self.ff_proc_minutes_box.get()
def selected_seconds(event):
self.selected_seconds = self.ff_proc_seconds_box.get()
self.ff_proc_hours_box.bind("<<ComboboxSelected>>", selected_hours)
self.ff_proc_minutes_box.bind("<<ComboboxSelected>>", selected_minutes)
self.ff_proc_seconds_box.bind("<<ComboboxSelected>>", selected_seconds)
self.ff_proc_hours_box.grid(row=4, column=1, sticky=W, padx=25, pady=(5, 0))
self.ff_proc_minutes_box.grid(row=4, column=1, sticky=W, padx=100, pady=(5, 0))
self.ff_proc_seconds_box.grid(row=4, column=1, sticky=W, padx=170, pady=(5, 0))
########################### ÜBERSCHRIFTEN / LABELS FÜR EINGABEFELDER-MATRIX ##############################
self.var_min_label = Label(self.ff_frame, text=' Min.')
self.var_min_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=60)
self.var_max_label = Label(self.ff_frame, text=' Max.')
self.var_max_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=100)
self.var_prec_label = Label(self.ff_frame, text=' Präz.')
self.var_prec_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=140)
self.var_divby_label = Label(self.ff_frame, text=' Teilbar\ndurch')
self.var_divby_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=180)
self.variable1_label = Label(self.ff_frame, text='Variable 1')
self.variable2_label = Label(self.ff_frame, text='Variable 2')
self.variable3_label = Label(self.ff_frame, text='Variable 3')
self.variable4_label = Label(self.ff_frame, text='Variable 4')
self.variable5_label = Label(self.ff_frame, text='Variable 5')
self.variable6_label = Label(self.ff_frame, text='Variable 6')
self.variable7_label = Label(self.ff_frame, text='Variable 7')
self.variable8_label = Label(self.ff_frame, text='Variable 8')
self.variable9_label = Label(self.ff_frame, text='Variable 9')
self.variable10_label = Label(self.ff_frame, text='Variable 10')
self.variable11_label = Label(self.ff_frame, text='Variable 11')
self.variable12_label = Label(self.ff_frame, text='Variable 12')
self.variable13_label = Label(self.ff_frame, text='Variable 13')
self.variable14_label = Label(self.ff_frame, text='Variable 14')
self.variable15_label = Label(self.ff_frame, text='Variable 15')
# Label für Var1 ist immer aktiv/ zu sehen. Var2-10 werden je nach Auswahl ein-/ausgeblendet
self.variable1_label.grid(row=6, column=0, sticky=W, padx=20)
########################### EINGABEFELDER / ENTRYS FÜR EINGABEFELDER-MATRIX ##############################
self.var1_name_entry = Entry(self.ff_frame, width=6)
self.var1_min_entry = Entry(self.ff_frame, width=6)
self.var1_max_entry = Entry(self.ff_frame, width=6)
self.var1_prec_entry = Entry(self.ff_frame, width=6)
self.var1_divby_entry = Entry(self.ff_frame, width=6)
self.var2_name_entry = Entry(self.ff_frame, width=6)
self.var2_min_entry = Entry(self.ff_frame, width=6)
self.var2_max_entry = Entry(self.ff_frame, width=6)
self.var2_prec_entry = Entry(self.ff_frame, width=6)
self.var2_divby_entry = Entry(self.ff_frame, width=6)
self.var3_name_entry = Entry(self.ff_frame, width=6)
self.var3_min_entry = Entry(self.ff_frame, width=6)
self.var3_max_entry = Entry(self.ff_frame, width=6)
self.var3_prec_entry = Entry(self.ff_frame, width=6)
self.var3_divby_entry = Entry(self.ff_frame, width=6)
self.var4_name_entry = Entry(self.ff_frame, width=6)
self.var4_min_entry = Entry(self.ff_frame, width=6)
self.var4_max_entry = Entry(self.ff_frame, width=6)
self.var4_prec_entry = Entry(self.ff_frame, width=6)
self.var4_divby_entry = Entry(self.ff_frame, width=6)
self.var5_name_entry = Entry(self.ff_frame, width=6)
self.var5_min_entry = Entry(self.ff_frame, width=6)
self.var5_max_entry = Entry(self.ff_frame, width=6)
self.var5_prec_entry = Entry(self.ff_frame, width=6)
self.var5_divby_entry = Entry(self.ff_frame, width=6)
self.var6_name_entry = Entry(self.ff_frame, width=6)
self.var6_min_entry = Entry(self.ff_frame, width=6)
self.var6_max_entry = Entry(self.ff_frame, width=6)
self.var6_prec_entry = Entry(self.ff_frame, width=6)
self.var6_divby_entry = Entry(self.ff_frame, width=6)
self.var7_name_entry = Entry(self.ff_frame, width=6)
self.var7_min_entry = Entry(self.ff_frame, width=6)
self.var7_max_entry = Entry(self.ff_frame, width=6)
self.var7_prec_entry = Entry(self.ff_frame, width=6)
self.var7_divby_entry = Entry(self.ff_frame, width=6)
self.var8_name_entry = Entry(self.ff_frame, width=6)
self.var8_min_entry = Entry(self.ff_frame, width=6)
self.var8_max_entry = Entry(self.ff_frame, width=6)
self.var8_prec_entry = Entry(self.ff_frame, width=6)
self.var8_divby_entry = Entry(self.ff_frame, width=6)
self.var9_name_entry = Entry(self.ff_frame, width=6)
self.var9_min_entry = Entry(self.ff_frame, width=6)
self.var9_max_entry = Entry(self.ff_frame, width=6)
self.var9_prec_entry = Entry(self.ff_frame, width=6)
self.var9_divby_entry = Entry(self.ff_frame, width=6)
self.var10_name_entry = Entry(self.ff_frame, width=6)
self.var10_min_entry = Entry(self.ff_frame, width=6)
self.var10_max_entry = Entry(self.ff_frame, width=6)
self.var10_prec_entry = Entry(self.ff_frame, width=6)
self.var10_divby_entry = Entry(self.ff_frame, width=6)
self.var11_name_entry = Entry(self.ff_frame, width=6)
self.var11_min_entry = Entry(self.ff_frame, width=6)
self.var11_max_entry = Entry(self.ff_frame, width=6)
self.var11_prec_entry = Entry(self.ff_frame, width=6)
self.var11_divby_entry = Entry(self.ff_frame, width=6)
self.var12_name_entry = Entry(self.ff_frame, width=6)
self.var12_min_entry = Entry(self.ff_frame, width=6)
self.var12_max_entry = Entry(self.ff_frame, width=6)
self.var12_prec_entry = Entry(self.ff_frame, width=6)
self.var12_divby_entry = Entry(self.ff_frame, width=6)
self.var13_name_entry = Entry(self.ff_frame, width=6)
self.var13_min_entry = Entry(self.ff_frame, width=6)
self.var13_max_entry = Entry(self.ff_frame, width=6)
self.var13_prec_entry = Entry(self.ff_frame, width=6)
self.var13_divby_entry = Entry(self.ff_frame, width=6)
self.var14_name_entry = Entry(self.ff_frame, width=6)
self.var14_min_entry = Entry(self.ff_frame, width=6)
self.var14_max_entry = Entry(self.ff_frame, width=6)
self.var14_prec_entry = Entry(self.ff_frame, width=6)
self.var14_divby_entry = Entry(self.ff_frame, width=6)
self.var15_name_entry = Entry(self.ff_frame, width=6)
self.var15_min_entry = Entry(self.ff_frame, width=6)
self.var15_max_entry = Entry(self.ff_frame, width=6)
self.var15_prec_entry = Entry(self.ff_frame, width=6)
self.var15_divby_entry = Entry(self.ff_frame, width=6)
# Variablen Entries in Listen zusammenfassen
# Die Listen bieten den Vorteil, dass bei der Platzierung auf der GUI eine Schleife verwendet werden kann
self.var_label_list = [self.variable1_label, self.variable2_label, self.variable3_label, self.variable4_label, self.variable5_label, self.variable6_label, self.variable7_label,
self.variable8_label, self.variable9_label, self.variable10_label, self.variable11_label, self.variable12_label, self.variable13_label, self.variable14_label, self.variable15_label]
self.var_name_entry_list = [self.var1_name_entry, self.var2_name_entry, self.var3_name_entry, self.var4_name_entry, self.var5_name_entry, self.var6_name_entry, self.var7_name_entry,
self.var8_name_entry, self.var9_name_entry, self.var10_name_entry, self.var11_name_entry, self.var12_name_entry, self.var13_name_entry, self.var14_name_entry, self.var15_name_entry]
self.var_min_entry_list = [self.var1_min_entry, self.var2_min_entry, self.var3_min_entry, self.var4_min_entry, self.var5_min_entry, self.var6_min_entry, self.var7_min_entry,
self.var8_min_entry, self.var9_min_entry, self.var10_min_entry, self.var11_min_entry, self.var12_min_entry, self.var13_min_entry, self.var14_min_entry, self.var15_min_entry]
self.var_max_entry_list = [self.var1_max_entry, self.var2_max_entry, self.var3_max_entry, self.var4_max_entry, self.var5_max_entry, self.var6_max_entry, self.var7_max_entry,
self.var8_max_entry, self.var9_max_entry, self.var10_max_entry, self.var11_max_entry, self.var12_max_entry, self.var13_max_entry, self.var14_max_entry, self.var15_max_entry]
self.var_prec_entry_list = [self.var1_prec_entry, self.var2_prec_entry, self.var3_prec_entry, self.var4_prec_entry, self.var5_prec_entry, self.var6_prec_entry, self.var7_prec_entry,
self.var8_prec_entry, self.var9_prec_entry, self.var10_prec_entry, self.var11_prec_entry, self.var12_prec_entry, self.var13_prec_entry, self.var14_prec_entry, self.var15_prec_entry]
self.var_divby_entry_list = [self.var1_divby_entry, self.var2_divby_entry, self.var3_divby_entry, self.var4_divby_entry, self.var5_divby_entry, self.var6_divby_entry, self.var7_divby_entry,
self.var8_divby_entry, self.var9_divby_entry, self.var10_divby_entry, self.var11_divby_entry, self.var12_divby_entry, self.var13_divby_entry, self.var14_divby_entry, self.var15_divby_entry]
# Eingabefelder für Var1 sind immer aktiv/ zu sehen. Var2-10 werden je nach Auswahl ein-/ausgeblendet
self.var1_name_entry.grid(row=6, column=1, sticky=W)
self.var1_min_entry.grid(row=6, column=1, sticky=W, padx=60)
self.var1_max_entry.grid(row=6, column=1, sticky=W, padx=100)
self.var1_prec_entry.grid(row=6, column=1, sticky=W, padx=140)
self.var1_divby_entry.grid(row=6, column=1, sticky=W, padx=180)
# Wertebereich berechnen für Formel aus Eingabefeld: formula 1
self.calculate_value_range_btn = Button(self.ff_frame, text="Wertebereich berechnen",command=lambda: Formelfrage.ff_calculate_value_range_function_in_GUI(self, "0"))
self.calculate_value_range_btn.grid(row=6, column=1, padx=50, sticky="E")
########################### EINGABEFELDER-MATRIX (VARIABLEN) EIN/AUSBLENDEN ##############################
# Hier werden durch die Funktion "ff_answer_selected" die Variable - Eingabefelder (je nach Wert) ein-/ausgeblendet
def ff_answer_selected(event): # "variable" need for comboBox Binding
self.selected_number_of_variables = int(self.ff_numbers_of_answers_box.get())
# Schleife zur Platzierung der Entries auf der GUI
# Bei einer Auswahl von 5 Variablen, werden auf der GUI die Zeilen 1-5 platziert
for i in range(self.selected_number_of_variables):
Formelfrage.ff_variable_show_or_remove(self, self.var_label_list[i], self.var_name_entry_list[i], self.var_min_entry_list[i], self.var_max_entry_list[i], self.var_prec_entry_list[i], self.var_divby_entry_list[i], str(i+7), "show")
# Schleife zum ausblenden der Entries auf der GUI
# Bei einer Auswahl von 5 Variablen, werden auf der GUI die Zeilen 6-15 ausgeblendet
for j in range(self.selected_number_of_variables, len(self.var_min_entry_list)):
Formelfrage.ff_variable_show_or_remove(self, self.var_label_list[j], self.var_name_entry_list[j], self.var_min_entry_list[j], self.var_max_entry_list[j], self.var_prec_entry_list[j], self.var_divby_entry_list[j], str(j+7), "remove")
self.ff_numbers_of_answers_box_label = Label(self.ff_frame, text="Anzahl der Variablen: ")
self.ff_numbers_of_answers_box_label.grid(row=5, column=0, sticky=W, padx=10, pady=(20, 0))
self.ff_numbers_of_answers_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"]
self.ff_numbers_of_answers_box = ttk.Combobox(self.ff_frame, value=self.ff_numbers_of_answers_value, width=3)
self.ff_numbers_of_answers_box.bind("<<ComboboxSelected>>", ff_answer_selected)
self.ff_numbers_of_answers_box.grid(row=5, column=1, sticky=W, pady=(20, 0))
self.ff_numbers_of_answers_box.current(0)
########################### AUSWAHL DER EINHEITEN FÜR VARIABLEN ---- DERZEIT NICHT AKTIV ##############################
self.select_var_units = ["Unit", "H", "mH", "µH", "nH", "pH", "---", "F", "mF", "µF", "nF", "pF", "---", "MV", "kV", "V", "mV", "µV", "---"]
self.var1_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var1_unit_myCombo.current(0)
self.var2_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var2_unit_myCombo.current(0)
self.var3_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var3_unit_myCombo.current(0)
self.var4_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var4_unit_myCombo.current(0)
self.var5_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var5_unit_myCombo.current(0)
self.var6_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var6_unit_myCombo.current(0)
self.var7_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var7_unit_myCombo.current(0)
########################### ÜBERSCHRIFTEN / LABELS FÜR EINGABEFELDER-MATRIX ##############################
self.res_min_label = Label(self.ff_frame, text=' Min.')
self.res_max_label = Label(self.ff_frame, text=' Max.')
self.res_prec_label = Label(self.ff_frame, text=' Präz.')
self.res_tol_label = Label(self.ff_frame, text=' Tol.')
self.res_points_label = Label(self.ff_frame, text='Punkte')
self.res_formula_label = Label(self.ff_frame, text='Formel')
self.res_min_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=60)
self.res_max_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=100)
self.res_prec_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=140)
self.res_tol_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=180)
self.res_points_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=220)
self.res_formula_label.grid(row=40, column=1, sticky=E, pady=(10, 0), padx=100)
self.result1_label = Label(self.ff_frame, text='Ergebnis 1')
self.result2_label = Label(self.ff_frame, text='Ergebnis 2')
self.result3_label = Label(self.ff_frame, text='Ergebnis 3')
self.result4_label = Label(self.ff_frame, text='Ergebnis 4')
self.result5_label = Label(self.ff_frame, text='Ergebnis 5')
self.result6_label = Label(self.ff_frame, text='Ergebnis 6')
self.result7_label = Label(self.ff_frame, text='Ergebnis 7')
self.result8_label = Label(self.ff_frame, text='Ergebnis 8')
self.result9_label = Label(self.ff_frame, text='Ergebnis 9')
self.result10_label = Label(self.ff_frame, text='Ergebnis 10')
# Label für Res1 ist immer aktiv/ zu sehen. Res2-10 werden je nach Auswahl ein-/ausgeblendet
self.result1_label.grid(row=41, column=0, sticky=W, padx=20)
########################### EINGABEFELDER / ENTRYS FÜR EINGABEFELDER-MATRIX ##############################
self.res1_name_entry = Entry(self.ff_frame, width=6)
self.res1_min_entry = Entry(self.ff_frame, width=6)
self.res1_max_entry = Entry(self.ff_frame, width=6)
self.res1_prec_entry = Entry(self.ff_frame, width=6)
self.res1_tol_entry = Entry(self.ff_frame, width=6)
self.res1_points_entry = Entry(self.ff_frame, width=6)
self.res1_formula_entry = Entry(self.ff_frame, width=30)
self.res2_name_entry = Entry(self.ff_frame, width=6)
self.res2_min_entry = Entry(self.ff_frame, width=6)
self.res2_max_entry = Entry(self.ff_frame, width=6)
self.res2_prec_entry = Entry(self.ff_frame, width=6)
self.res2_tol_entry = Entry(self.ff_frame, width=6)
self.res2_points_entry = Entry(self.ff_frame, width=6)
self.res2_formula_entry = Entry(self.ff_frame, width=30)
self.res3_name_entry = Entry(self.ff_frame, width=6)
self.res3_min_entry = Entry(self.ff_frame, width=6)
self.res3_max_entry = Entry(self.ff_frame, width=6)
self.res3_prec_entry = Entry(self.ff_frame, width=6)
self.res3_tol_entry = Entry(self.ff_frame, width=6)
self.res3_points_entry = Entry(self.ff_frame, width=6)
self.res3_formula_entry = Entry(self.ff_frame, width=30)
self.res4_name_entry = Entry(self.ff_frame, width=6)
self.res4_min_entry = Entry(self.ff_frame, width=6)
self.res4_max_entry = Entry(self.ff_frame, width=6)
self.res4_prec_entry = Entry(self.ff_frame, width=6)
self.res4_tol_entry = Entry(self.ff_frame, width=6)
self.res4_points_entry = Entry(self.ff_frame, width=6)
self.res4_formula_entry = Entry(self.ff_frame, width=30)
self.res5_name_entry = Entry(self.ff_frame, width=6)
self.res5_min_entry = Entry(self.ff_frame, width=6)
self.res5_max_entry = Entry(self.ff_frame, width=6)
self.res5_prec_entry = Entry(self.ff_frame, width=6)
self.res5_tol_entry = Entry(self.ff_frame, width=6)
self.res5_points_entry = Entry(self.ff_frame, width=6)
self.res5_formula_entry = Entry(self.ff_frame, width=30)
self.res6_name_entry = Entry(self.ff_frame, width=6)
self.res6_min_entry = Entry(self.ff_frame, width=6)
self.res6_max_entry = Entry(self.ff_frame, width=6)
self.res6_prec_entry = Entry(self.ff_frame, width=6)
self.res6_tol_entry = Entry(self.ff_frame, width=6)
self.res6_points_entry = Entry(self.ff_frame, width=6)
self.res6_formula_entry = Entry(self.ff_frame, width=30)
self.res7_name_entry = Entry(self.ff_frame, width=6)
self.res7_min_entry = Entry(self.ff_frame, width=6)
self.res7_max_entry = Entry(self.ff_frame, width=6)
self.res7_prec_entry = Entry(self.ff_frame, width=6)
self.res7_tol_entry = Entry(self.ff_frame, width=6)
self.res7_points_entry = Entry(self.ff_frame, width=6)
self.res7_formula_entry = Entry(self.ff_frame, width=30)
self.res8_name_entry = Entry(self.ff_frame, width=6)
self.res8_min_entry = Entry(self.ff_frame, width=6)
self.res8_max_entry = Entry(self.ff_frame, width=6)
self.res8_prec_entry = Entry(self.ff_frame, width=6)
self.res8_tol_entry = Entry(self.ff_frame, width=6)
self.res8_points_entry = Entry(self.ff_frame, width=6)
self.res8_formula_entry = Entry(self.ff_frame, width=30)
self.res9_name_entry = Entry(self.ff_frame, width=6)
self.res9_min_entry = Entry(self.ff_frame, width=6)
self.res9_max_entry = Entry(self.ff_frame, width=6)
self.res9_prec_entry = Entry(self.ff_frame, width=6)
self.res9_tol_entry = Entry(self.ff_frame, width=6)
self.res9_points_entry = Entry(self.ff_frame, width=6)
self.res9_formula_entry = Entry(self.ff_frame, width=30)
self.res10_name_entry = Entry(self.ff_frame, width=6)
self.res10_min_entry = Entry(self.ff_frame, width=6)
self.res10_max_entry = Entry(self.ff_frame, width=6)
self.res10_prec_entry = Entry(self.ff_frame, width=6)
self.res10_tol_entry = Entry(self.ff_frame, width=6)
self.res10_points_entry = Entry(self.ff_frame, width=6)
self.res10_formula_entry = Entry(self.ff_frame, width=30)
# Eingabefelder für Res1 sind immer aktiv/ zu sehen. Res2-10 werden je nach Auswahl ein-/ausgeblendet
self.res1_name_entry.grid(row=41, column=1, sticky=W)
self.res1_min_entry.grid(row=41, column=1, sticky=W, padx=60)
self.res1_max_entry.grid(row=41, column=1, sticky=W, padx=100)
self.res1_prec_entry.grid(row=41, column=1, sticky=W, padx=140)
self.res1_tol_entry.grid(row=41, column=1, sticky=W, padx=180)
self.res1_points_entry.grid(row=41, column=1, sticky=W, padx=220)
self.res1_formula_entry.grid(row=41, column=1, sticky=E, padx=20)
# Ergebnis Entries in Listen zusammenfassen
# Die Listen bieten den Vorteil, dass bei der Platzierung auf der GUI eine Schleife verwendet werden kann
self.res_label_list = [self.result1_label, self.result2_label, self.result3_label, self.result4_label, self.result5_label,
self.result6_label, self.result7_label, self.result8_label, self.result9_label, self.result10_label]
self.res_name_entry_list = [self.res1_name_entry, self.res2_name_entry, self.res3_name_entry, self.res4_name_entry, self.res5_name_entry,
self.res6_name_entry, self.res7_name_entry, self.res8_name_entry, self.res9_name_entry, self.res10_name_entry]
self.res_min_entry_list = [self.res1_min_entry, self.res2_min_entry, self.res3_min_entry, self.res4_min_entry, self.res5_min_entry,
self.res6_min_entry, self.res7_min_entry, self.res8_min_entry, self.res9_min_entry, self.res10_min_entry]
self.res_max_entry_list = [self.res1_max_entry, self.res2_max_entry, self.res3_max_entry, self.res4_max_entry, self.res5_max_entry,
self.res6_max_entry, self.res7_max_entry, self.res8_max_entry, self.res9_max_entry, self.res10_max_entry]
self.res_prec_entry_list = [self.res1_prec_entry, self.res2_prec_entry, self.res3_prec_entry, self.res4_prec_entry, self.res5_prec_entry,
self.res6_prec_entry, self.res7_prec_entry, self.res8_prec_entry, self.res9_prec_entry, self.res10_prec_entry]
self.res_tol_entry_list = [self.res1_tol_entry, self.res2_tol_entry, self.res3_tol_entry, self.res4_tol_entry, self.res5_tol_entry,
self.res6_tol_entry, self.res7_tol_entry, self.res8_tol_entry, self.res9_tol_entry, self.res10_tol_entry]
self.res_points_entry_list = [self.res1_points_entry, self.res2_points_entry, self.res3_points_entry, self.res4_points_entry, self.res5_points_entry,
self.res6_points_entry, self.res7_points_entry, self.res8_points_entry, self.res9_points_entry, self.res10_points_entry]
self.res_formula_entry_list = [self.res1_formula_entry, self.res2_formula_entry, self.res3_formula_entry, self.res4_formula_entry, self.res5_formula_entry,
self.res6_formula_entry, self.res7_formula_entry, self.res8_formula_entry, self.res9_formula_entry, self.res10_formula_entry]
# Liste werden für Wertebereich berechnung verwendet
self.var_res_combined_min_entries_list = [self.var1_min_entry, self.var2_min_entry, self.var3_min_entry, self.var4_min_entry,
self.var5_min_entry, self.var6_min_entry, self.var7_min_entry,
self.var8_min_entry, self.var9_min_entry, self.var10_min_entry, self.var11_min_entry,
self.var12_min_entry, self.var13_min_entry, self.var14_min_entry,
self.var15_min_entry, self.res1_min_entry, self.res2_min_entry, self.res3_min_entry,
self.res4_min_entry, self.res5_min_entry, self.res6_min_entry, self.res7_min_entry,
self.res8_min_entry, self.res9_min_entry, self.res10_min_entry ]
self.var_res_combined_max_entries_list = [self.var1_max_entry, self.var2_max_entry, self.var3_max_entry, self.var4_max_entry,
self.var5_max_entry, self.var6_max_entry, self.var7_max_entry,
self.var8_max_entry, self.var9_max_entry, self.var10_max_entry, self.var11_max_entry,
self.var12_max_entry, self.var13_max_entry, self.var14_max_entry,
self.var15_max_entry, self.res1_max_entry, self.res2_max_entry, self.res3_max_entry, self.res4_max_entry,
self.res5_max_entry,
self.res6_max_entry, self.res7_max_entry, self.res8_max_entry, self.res9_max_entry,
self.res10_max_entry]
#############################
#################### EINHEITEN FÜR ERGEBNISSE DERZEIT DEAKTIVIERT
# self.res1_unit_myCombo = ttk.Combobox(self.frame_formula, value=self.select_var_units, width=5)
# self.res1_unit_myCombo.current(0)
# self.res1_unit_myCombo.bind("<<ComboboxSelected>>", selected_var)
# #self.res1_unit_myCombo.grid(row=21, column=0, sticky=E, padx=10)
#
# self.res2_unit_myCombo = ttk.Combobox(self.frame_formula, value=self.select_var_units, width=5)
# self.res2_unit_myCombo.current(0)
# self.res2_unit_myCombo.bind("<<ComboboxSelected>>", selected_var)
#
# self.res3_unit_myCombo = ttk.Combobox(self.frame_formula, value=self.select_var_units, width=5)
# self.res3_unit_myCombo.current(0)
# self.res3_unit_myCombo.bind("<<ComboboxSelected>>", selected_var)
# Hier werden durch die Funktion "ff_result_selected" die Ergebnisse - Eingabefelder (je nach Wert) ein-/ausgeblendet
def ff_result_selected(event): # "variable" need for comboBox Binding
self.selected_number_of_results = int(self.ff_numbers_of_results_box.get())
# Schleife zur Platzierung der Entries auf der GUI
# Bei einer Auswahl von 5 Ergebnissen, werden auf der GUI die Zeilen 1-5 platziert
for i in range(self.selected_number_of_results):
#Formelfrage.ff_variable_show_or_remove(self, self.var_label_list[i], self.var_name_entry_list[i], self.var_min_entry_list[i], self.var_max_entry_list[i], self.var_prec_entry_list[i], self.var_divby_entry_list[i], str(i+7), "show")
Formelfrage.ff_result_show_or_remove(self, self.res_label_list[i], self.res_name_entry_list[i], self.res_min_entry_list[i], self.res_max_entry_list[i], self.res_prec_entry_list[i], self.res_tol_entry_list[i], self.res_points_entry_list[i], self.res_formula_entry_list[i], str(i+42), "show")
# Schleife zum ausblenden der Entries auf der GUI
# Bei einer Auswahl von 5 Ergebnissen, werden auf der GUI die Zeilen 6-15 ausgeblendet
for j in range(self.selected_number_of_results, len(self.res_min_entry_list)):
Formelfrage.ff_result_show_or_remove(self, self.res_label_list[j], self.res_name_entry_list[j], self.res_min_entry_list[j], self.res_max_entry_list[j], self.res_prec_entry_list[j], self.res_tol_entry_list[j], self.res_points_entry_list[j], self.res_formula_entry_list[j], str(j+42), "remove")
self.ff_numbers_of_results_box_label = Label(self.ff_frame, text="Anzahl der Ergebnisse: ")
self.ff_numbers_of_results_box_label.grid(row=40, column=0, sticky=W, padx=10, pady=(20, 0))
self.ff_numbers_of_results_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.ff_numbers_of_results_box = ttk.Combobox(self.ff_frame, value=self.ff_numbers_of_results_value, width=3)
self.ff_numbers_of_results_box.current(0)
self.ff_numbers_of_results_box.bind("<<ComboboxSelected>>", ff_result_selected)
self.ff_numbers_of_results_box.grid(row=40, column=1, sticky=W, pady=(20, 0))
def ff_variable_show_or_remove(self, var_label, var_name_entry, var_min_entry, var_max_entry, var_prec_entry, var_divby_entry, row_nr, var_status):
if var_status == "show":
var_label.grid(row=int(row_nr), column=0, sticky=W, padx=20)
var_name_entry.grid(row=int(row_nr), column=1, sticky=W)
var_min_entry.grid(row=int(row_nr), column=1, sticky=W, padx=60)
var_max_entry.grid(row=int(row_nr), column=1, sticky=W, padx=100)
var_prec_entry.grid(row=int(row_nr), column=1, sticky=W, padx=140)
var_divby_entry.grid(row=int(row_nr), column=1, sticky=W, padx=180)
#var_unit_myCombo.grid(row=int(row_nr), column=0, sticky=E, padx=10)
else:
var_label.grid_remove()
var_name_entry.grid_remove()
var_min_entry.grid_remove()
var_max_entry.grid_remove()
var_prec_entry.grid_remove()
var_divby_entry.grid_remove()
# var_unit_myCombo.grid_remove()
def ff_result_show_or_remove(self, res_label, res_name_entry, res_min_entry, res_max_entry, res_prec_entry, res_tol_entry, res_points_entry, res_formula_entry, row_nr, res_status):
if res_status == "show":
res_label.grid(row=int(row_nr), column=0, sticky=W, padx=20)
res_name_entry.grid(row=int(row_nr), column=1, sticky=W)
res_min_entry.grid(row=int(row_nr), column=1, sticky=W, padx=60)
res_max_entry.grid(row=int(row_nr), column=1, sticky=W, padx=100)
res_prec_entry.grid(row=int(row_nr), column=1, sticky=W, padx=140)
res_tol_entry.grid(row=int(row_nr), column=1, sticky=W, padx=180)
res_points_entry.grid(row=int(row_nr), column=1, sticky=W, padx=220)
res_formula_entry.grid(row=int(row_nr), column=1, sticky=E, padx=20)
#res_unit_myCombo.grid(row=int(row_nr), column=0, sticky=E, padx=10)
else:
res_label.grid_remove()
res_name_entry.grid_remove()
res_min_entry.grid_remove()
res_max_entry.grid_remove()
res_prec_entry.grid_remove()
res_tol_entry.grid_remove()
res_points_entry.grid_remove()
res_formula_entry.grid_remove()
#var_unit_myCombo.grid_remove()
def unit_table(self, selected_unit):
self.unit_to_ilias_code = { "H" : "125", "mH" : "126", "µH" : "127", "nH" : "128", "kH" : "129", "pH" : "130",
"F" : "131", "mF" : "132", "µF" : "133", "nF" : "134", "kF" : "135",
"W" : "136", "kW" : "137", "MW" : "138", "mW" : "149",
"V" : "139", "kV" : "140", "mV" : "141", "µV" : "142", "MV" : "143",
"A" : "144", "mA" : "145", "µA" : "146", "kA" : "147",
"Ohm" : "148", "kOhm" : "150", "mOhm" : "151"}
self.var_selected_unit = selected_unit
self.selected_unit = self.unit_to_ilias_code[self.var_selected_unit]
return self.selected_unit
def ff_replace_character_in_xml_file(self, file_path_qti_xml):
# Im Nachgang werden alle "&" wieder gegen "&" getauscht
# "&" Zeichen kann XML nicht verarbeiten, daher wurde beim schreiben der Texte in die XML "&" gegen "&" getauscht
# XML Datei zum lesen öffnen 'r' -> "read"
with open(file_path_qti_xml, 'r') as xml_file:
xml_str = xml_file.read()
xml_str = xml_str.replace('&', '&') #replace 'x' with 'new_x'
# In XML Datei schreiben 'w" -> "write"
with open(file_path_qti_xml, 'w') as replaced_xml_file:
replaced_xml_file.write(xml_str)
print("...XML_DATEI_QTI -- \"&\"-ZEICHEN ÜBERARBEITUNG ABGESCHLOSSEN!")
# Wertebereich berechnen (für bis zu 4 Variablen in akzeptabler Zeit)
def ff_calculate_value_range_function_in_GUI(self, ids_in_entry_box):
self.all_entries_from_db_list = []
if self.ff_var_calculate_value_range_from_db_entries_check.get() == 1:
self.ff_test_entry_splitted = ids_in_entry_box.split(",")
print("POOL wird erstellt")
# Prüfen ob alle Einträge generiert werden sollen (checkbox gesetzt)
if self.ff_var_create_question_pool_all_check.get() == 1 and self.ff_var_create_multiple_question_pools_from_tax_check.get() == 0:
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = c.fetchall()
for ff_db_record in ff_db_records:
self.all_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.string_temp = ','.join(map(str, self.all_entries_from_db_list))
self.ff_test_entry_splitted = self.string_temp.split(",")
# Eintrag mit ID "1" entspricht der Vorlage und soll nicht mit erstellt werden
self.ff_test_entry_splitted.pop(0)
# Mit Datenbank verbinden
conn = sqlite3.connect(self.database_formelfrage_path)
cursor = conn.cursor()
cursor.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = cursor.fetchall()
for i in range(len(self.ff_test_entry_splitted)):
for ff_db_record in ff_db_records:
if str(ff_db_record[len(ff_db_record) - 1]) == self.ff_test_entry_splitted[i]:
Formelfrage.ff_clear_GUI(self)
self.var1_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var1_min']])
self.var1_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var1_max']])
self.var1_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']])
self.var2_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var2_min']])
self.var2_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var2_max']])
self.var2_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']])
self.var3_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var3_min']])
self.var3_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var3_max']])
self.var3_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']])
self.var4_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var4_min']])
self.var4_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var4_max']])
self.var4_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']])
self.var5_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var5_min']])
self.var5_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var5_max']])
self.var5_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']])
self.var6_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var6_min']])
self.var6_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var6_max']])
self.var6_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']])
self.var7_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var7_min']])
self.var7_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var7_max']])
self.var7_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']])
self.var8_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var8_min']])
self.var8_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var8_max']])
self.var8_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']])
self.var9_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var9_min']])
self.var9_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var9_max']])
self.var9_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']])
self.var10_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var10_min']])
self.var10_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var10_max']])
self.var10_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']])
self.var11_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var11_min']])
self.var11_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var11_max']])
self.var11_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']])
self.var12_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var12_min']])
self.var12_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var12_max']])
self.var12_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']])
self.var13_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var13_min']])
self.var13_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var13_max']])
self.var13_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']])
self.var14_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var14_min']])
self.var14_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var14_max']])
self.var14_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']])
self.var15_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var15_min']])
self.var15_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var15_max']])
self.var15_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']])
self.res1_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']])
self.res1_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_min']])
self.res1_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_max']])
self.res1_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']])
self.res2_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']])
self.res2_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_min']])
self.res2_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_max']])
self.res2_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']])
self.res3_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']])
self.res3_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_min']])
self.res3_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_max']])
self.res3_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']])
self.res4_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']])
self.res4_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_min']])
self.res4_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_max']])
self.res4_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']])
self.res5_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']])
self.res5_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_min']])
self.res5_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_max']])
self.res5_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']])
self.res6_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']])
self.res6_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_min']])
self.res6_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_max']])
self.res6_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']])
self.res7_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']])
self.res7_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_min']])
self.res7_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_max']])
self.res7_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']])
self.res8_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']])
self.res8_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_min']])
self.res8_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_max']])
self.res8_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']])
self.res9_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']])
self.res9_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_min']])
self.res9_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_max']])
self.res9_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']])
self.res10_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']])
self.res10_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_min']])
self.res10_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_max']])
self.res10_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']])
print("INSERTED")
# Formel ausrechnen, wenn eine im Eingabefeld vorhanden ist
if self.res1_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res1_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res1_min_entry, self.res1_max_entry, self.res1_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res2_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res2_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res2_min_entry, self.res2_max_entry, self.res2_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res3_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res3_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res3_min_entry, self.res3_max_entry, self.res3_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res4_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res4_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res4_min_entry, self.res4_max_entry, self.res4_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res5_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res5_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res5_min_entry, self.res5_max_entry, self.res5_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res6_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res6_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res6_min_entry, self.res6_max_entry, self.res6_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res7_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res7_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res7_min_entry, self.res7_max_entry, self.res7_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res8_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res8_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res8_min_entry, self.res8_max_entry, self.res8_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res9_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res9_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res9_min_entry, self.res9_max_entry, self.res9_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res10_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res10_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res10_min_entry, self.res10_max_entry, self.res10_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.ff_var_calculate_value_range_from_db_entries_check.get() == 1:
# Verbindung mit der Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
# sql_update_query = "UPDATE " + self.ff_database_table + " SET res1_min=?, res1_max=? WHERE id=?",( res_min_entry, res_max_entry, record_id)
for t in range(0, 10):
print(t)
c.execute("UPDATE " + self.ff_database_table + " SET res" + str(t+1) + "_min=?, res" + str(t+1) + "_max=? WHERE oid=?", (self.var_res_combined_min_entries_list[t+15].get(), self.var_res_combined_max_entries_list[t+15].get(), self.ff_test_entry_splitted[i]))
conn.commit()
conn.close()
else:
# Formel ausrechnen, wenn eine im Eingabefeld vorhanden ist
if self.res1_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res1_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res1_min_entry, self.res1_max_entry, self.res1_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res2_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res2_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res2_min_entry, self.res2_max_entry, self.res2_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res3_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res3_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res3_min_entry, self.res3_max_entry, self.res3_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res4_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res4_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res4_min_entry, self.res4_max_entry, self.res4_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res5_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res5_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res5_min_entry, self.res5_max_entry, self.res5_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res6_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res6_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res6_min_entry, self.res6_max_entry, self.res6_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res7_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res7_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res7_min_entry, self.res7_max_entry, self.res7_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res8_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res8_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res8_min_entry, self.res8_max_entry, self.res8_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res9_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res9_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res9_min_entry, self.res9_max_entry, self.res9_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
if self.res10_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res10_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res10_min_entry, self.res10_max_entry, self.res10_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_from_db_entries_check.get())
def ff_calculate_value_range_replace_formula_numpy(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, res_min_entries_list, res_max_entries_list):
self.formula = formula
self.formula_var_replaced = formula.replace('$', '_')
self.np_variables_translator_dict = {"pi": "np.pi",
",": ".",
"^": "**",
"e": "*10**",
"sin": "np.sin",
"cos": "np.cos",
"tan": "np.tan",
"arcsin": "np.arcsin",
"arccos": "np.arccos",
"arctan": "np.arctan",
"sinh": "np.sinh",
"cosh": "np.cosh",
"tanh": "np.tanh",
"arcsinh": "np.arcsinh",
"arccosh": "np.arccosh",
"arctanh": "np.arctanh",
"sqrt": "np.sqrt",
"abs": "np.abs",
"ln": "np.ln",
"log": "np.log",
"_v1": " row['a'] ",
"_v2": " row['b'] ",
"_v3": " row['c'] ",
"_v4": " row['d'] ",
"_v5": " row['e'] ",
"_v6": " row['f'] ",
"_v7": " row['g'] ",
"_v8": " row['h'] ",
"_v9": " row['i'] ",
"_v10": " row['j'] ",
"_v11": " row['k'] ",
"_v12": " row['l'] ",
"_v13": " row['m'] ",
"_v14": " row['n'] ",
"_v15": " row['o'] "}
self.np_results_translator_dict = {
"_r1": " row['p'] ",
"_r2": " row['q'] ",
"_r3": " row['r'] ",
"_r4": " row['s'] ",
"_r5": " row['t'] ",
"_r6": " row['u'] ",
"_r7": " row['v'] ",
"_r8": " row['w'] ",
"_r9": " row['x'] ",
"_r10": " row['y'] "}
print("----------------------")
print("Übernehme Formel aus Eingabefeld")
print("---> ", self.formula)
print("Prüfe auf Grenzen")
def replace_var(match):
return self.np_variables_translator_dict[match.group(0)]
def replace_res(match):
return self.np_results_translator_dict[match.group(0)]
print("====OUTPUT===")
self.formula_var_replaced = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in self.np_variables_translator_dict),replace_var, self.formula_var_replaced)
print(self.formula_var_replaced)
#for key in self.np_variables_translator_dict.keys():
# self.formula_var_replaced = self.formula_var_replaced.replace(key, self.np_variables_translator_dict[key])
self.formula_res_replaced = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in self.np_results_translator_dict),replace_res, self.formula_var_replaced)
print("FORMULA REPLACED")
print(self.formula_res_replaced)
#for key in self.np_results_translator_dict.keys():
# self.formula_res_replaced = self.formula_res_replaced.replace(key, self.np_results_translator_dict[key])
for i in range(len(var_res_combined_min_entries_list)):
if "$v" + (str(i+1)) in formula and var_res_combined_min_entries_list[i].get() != "" and var_res_combined_max_entries_list[i].get() != "":
self.formula = self.formula_var_replaced
for j in range(len(res_min_entries_list)):
if "$r" + (str(j+1)) in formula:
if res_min_entries_list[j].get() != "" and res_max_entries_list[j].get() != "":
print("Grenzen verfügbar! --> Ersetze alle Symbole mit numpy-symoblik")
self.formula = self.formula_res_replaced
else:
self.formula = "NaN"
if "$r" + (str(i+1)) in formula and var_res_combined_min_entries_list[i].get() != "" and var_res_combined_max_entries_list[i].get() != "":
self.formula = self.formula_res_replaced
print("retun formula", self.formula)
return self.formula
def ff_calculate_value_range_from_formula_in_GUI(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_prec_entries_list, res_min_entry, res_max_entry, res_prec_entry, res_min_entries_list, res_max_entries_list, calculate_value_range_for_pool_check):
def value_range_lower_upper_bounds(var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_lower_bound_list, var_upper_bound_list):
for u in range(len(var_res_combined_min_entries_list)):
if var_res_combined_min_entries_list[u] != "":
if bool(re.search(r'\d', var_res_combined_min_entries_list[u].get())) == True and bool(re.search(r'\d', var_res_combined_max_entries_list[u].get())) == True:
try:
var_lower_bound_list[u], var_upper_bound_list[u] = int(var_res_combined_min_entries_list[u].get()), int(var_res_combined_max_entries_list[u].get())
except ValueError:
var_lower_bound_list[u], var_upper_bound_list[u] = float(var_res_combined_min_entries_list[u].get()), float(var_res_combined_max_entries_list[u].get())
else:
var_lower_bound_list[u], var_upper_bound_list[u] = 0, 0
def min_max(col):
return pd.Series(index=['min', 'max'], data=[col.min(), col.max()])
# Alle Formeln berechnen die KEIN $r enthalten (nur variablen)
self.var1_lower, self.var1_upper = 0, 0
self.var2_lower, self.var2_upper = 0, 0
self.var3_lower, self.var3_upper = 0, 0
self.var4_lower, self.var4_upper = 0, 0
self.var5_lower, self.var5_upper = 0, 0
self.var6_lower, self.var6_upper = 0, 0
self.var7_lower, self.var7_upper = 0, 0
self.var8_lower, self.var8_upper = 0, 0
self.var9_lower, self.var9_upper = 0, 0
self.var10_lower, self.var10_upper = 0, 0
self.var11_lower, self.var11_upper = 0, 0
self.var12_lower, self.var12_upper = 0, 0
self.var13_lower, self.var13_upper = 0, 0
self.var14_lower, self.var14_upper = 0, 0
self.var15_lower, self.var15_upper = 0, 0
self.res1_lower, self.res1_upper = 0, 0
self.res2_lower, self.res2_upper = 0, 0
self.res3_lower, self.res3_upper = 0, 0
self.res4_lower, self.res4_upper = 0, 0
self.res5_lower, self.res5_upper = 0, 0
self.res6_lower, self.res6_upper = 0, 0
self.res7_lower, self.res7_upper = 0, 0
self.res8_lower, self.res8_upper = 0, 0
self.res9_lower, self.res9_upper = 0, 0
self.res10_lower, self.res10_upper = 0, 0
self.new_list = []
self.new_list2 = []
self.set_nr_of_var_index = []
self.var_prec_entry_list_values = []
self.lower_list = [self.var1_lower, self.var2_lower, self.var3_lower, self.var4_lower, self.var5_lower,
self.var6_lower, self.var7_lower, self.var8_lower, self.var9_lower, self.var10_lower,
self.var11_lower, self.var12_lower, self.var13_lower, self.var14_lower, self.var15_lower,
self.res1_lower, self.res2_lower, self.res3_lower, self.res4_lower, self.res5_lower,
self.res6_lower, self.res7_lower, self.res8_lower, self.res9_lower, self.res10_lower]
self.upper_list = [self.var1_upper, self.var2_upper, self.var3_upper, self.var4_upper, self.var5_upper,
self.var6_upper, self.var7_upper, self.var8_upper, self.var9_upper, self.var10_upper,
self.var11_upper, self.var12_upper, self.var13_upper, self.var14_upper, self.var15_upper,
self.res1_upper, self.res2_upper, self.res3_upper, self.res4_upper, self.res5_upper,
self.res6_upper, self.res7_upper, self.res8_upper, self.res9_upper, self.res10_upper]
self.new_dict = {"row['a']": 'a',
"row['b']": 'b',
"row['c']": 'c',
"row['d']": 'd',
"row['e']": 'e',
"row['f']": 'f',
"row['g']": 'g',
"row['h']": 'h',
"row['i']": 'i',
"row['j']": 'j',
"row['k']": 'k',
"row['l']": 'l',
"row['m']": 'm',
"row['n']": 'n',
"row['o']": 'o',
"row['p']": 'p',
"row['q']": 'q',
"row['r']": 'r',
"row['s']": 's',
"row['t']": 't',
"row['u']": 'u',
"row['v']": 'v',
"row['w']": 'w',
"row['x']": 'x',
"row['y']": 'y' }
self.list_index_dict = {'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'l': 11,
'm': 12,
'n': 13,
'o': 14,
'p': 15,
'q': 16,
'r': 17,
's': 18,
't': 19,
'u': 20,
'v': 21,
'w': 22,
'x': 23,
'y': 24,
}
values = []
# Number of values per range
N = 5
# ersetzt formel durch numpy expressions: z.B. 2^5 -> 2**5, $v1*2+$v3 -> row[a] *2+ row[c]
self.formula_1_numpy_expression = Formelfrage.ff_calculate_value_range_replace_formula_numpy(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, res_min_entries_list, res_max_entries_list)
if self.formula_1_numpy_expression != None and self.formula_1_numpy_expression != "NaN":
# neue formel wird nach leerzeichen gesplittet um einzelne 'row[a]' durch 'a' zu ersetzen
self.new_list = self.formula_1_numpy_expression.split(' ')
self.exp_as_func = eval('lambda row: ' + self.formula_1_numpy_expression)
# self.exp_as_func is not iterable, therefore it is assigned to function[]
functions = [self.exp_as_func]
value_range_lower_upper_bounds(var_res_combined_min_entries_list, var_res_combined_max_entries_list, self.lower_list, self.upper_list)
# ersetzen: 'row[a]' -> 'a' als neue Liste
for i in range(len(self.new_list)):
if "row" in self.new_list[i]:
if self.new_dict[self.new_list[i]] not in self.new_list2:
self.new_list2.append(self.new_dict[self.new_list[i]])
self.set_nr_of_var_index = sorted(self.new_list2)
self.max_index_nr = self.list_index_dict[self.set_nr_of_var_index[-1]] + 1
# Berechnung der Formel. "linspace" erstellt "N" Werte zwischen zwei Grenzen -> linspace(0,10,N) N=11 --> 0,1,2,3,4,5,6,7,8,9,10
for p in range(len(self.set_nr_of_var_index)):
values.append(np.linspace(self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p]]], self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]], N))
df = pd.DataFrame(cartesian_product(values), index=self.set_nr_of_var_index).T
if res_prec_entry.get() != "":
self.var_prec_highest_value = res_prec_entry.get()
else:
for i in range(len(var_prec_entries_list)):
self.var_prec_entry_list_values.append(var_prec_entries_list[i].get())
self.var_prec_highest_value = max(self.var_prec_entry_list_values)
#pd.options.display.float_format = '{:,.3f}'.format
for i, f in enumerate(functions):
df[f'f_{i + 1}'] = df.apply(f, axis=1)
df1 = df.apply(pd.to_numeric, errors='coerce')
print(df1)
print()
print("Ergebnis berechnet!")
print(df1.apply(min_max).iloc[0]['f_1'])
print(df1.apply(min_max).iloc[1]['f_1'])
print("////////////////////////")
self.res_min_calc_value = df1.apply(min_max).iloc[0]['f_1']
self.res_max_calc_value = df1.apply(min_max).iloc[1]['f_1']
#"{:.2f}".format(a_float)
res_min_entry.delete(0, END)
res_min_entry.insert(END, str("{:.2f}".format(self.res_min_calc_value)))
res_max_entry.delete(0, END)
res_max_entry.insert(END, str(self.res_max_calc_value))
# Prüfen ob $r.. in Formeln enthalten
for i in range(len(self.res_formula_entry_list)):
for j in range(1,10):
if "$r" + str(j) in str(self.res_formula_entry_list[i].get()):
print("$r" + str(j) + " found!", self.res_formula_entry_list[i].get())
if self.res_min_entry_list[j-1].get() != "" and self.res_max_entry_list[j-1].get() != "":
print("---", self.res_min_entry_list[j-1].get(), self.res_max_entry_list[j-1].get())
# Wertebereich für Aufgaben berechnen, während der Fragenpool Erstellung
def ff_calculate_value_range_function_from_DB(self, entry_to_index_dict, record_id):
self.ff_db_entry_to_index_dict = entry_to_index_dict
print("Werte aus DB auslesen...")
# Mit Datenbank verbinden
conn = sqlite3.connect(self.database_formelfrage_path)
cursor = conn.cursor()
cursor.execute("SELECT * FROM %s WHERE oid = %s " % (self.ff_database_table, str(record_id)))
ff_db_records = cursor.fetchall()
for ff_db_record in ff_db_records:
self.ff_var1_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var1_min']]
self.ff_var1_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var1_max']]
self.ff_var1_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']]
self.ff_var2_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var2_min']]
self.ff_var2_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var2_max']]
self.ff_var2_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']]
self.ff_var3_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var3_min']]
self.ff_var3_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var3_max']]
self.ff_var3_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']]
self.ff_var4_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var4_min']]
self.ff_var4_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var4_max']]
self.ff_var4_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']]
self.ff_var5_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var5_min']]
self.ff_var5_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var5_max']]
self.ff_var5_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']]
self.ff_var6_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var6_min']]
self.ff_var6_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var6_max']]
self.ff_var6_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']]
self.ff_var7_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var7_min']]
self.ff_var7_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var7_max']]
self.ff_var7_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']]
self.ff_var8_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var8_min']]
self.ff_var8_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var8_max']]
self.ff_var8_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']]
self.ff_var9_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var9_min']]
self.ff_var9_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var9_max']]
self.ff_var9_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']]
self.ff_var10_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var10_min']]
self.ff_var10_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var10_max']]
self.ff_var10_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']]
self.ff_var11_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var11_min']]
self.ff_var11_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var11_max']]
self.ff_var11_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']]
self.ff_var12_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var12_min']]
self.ff_var12_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var12_max']]
self.ff_var12_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']]
self.ff_var13_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var13_min']]
self.ff_var13_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var13_max']]
self.ff_var13_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']]
self.ff_var14_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var14_min']]
self.ff_var14_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var14_max']]
self.ff_var14_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']]
self.ff_var15_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var15_min']]
self.ff_var15_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var15_max']]
self.ff_var15_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']]
self.ff_res1_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res1_min']]
self.ff_res1_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res1_max']]
self.ff_res1_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']]
self.ff_res2_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res2_min']]
self.ff_res2_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res2_max']]
self.ff_res2_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']]
self.ff_res3_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res3_min']]
self.ff_res3_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res3_max']]
self.ff_res3_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']]
self.ff_res4_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res4_min']]
self.ff_res4_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res4_max']]
self.ff_res4_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']]
self.ff_res5_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res5_min']]
self.ff_res5_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res5_max']]
self.ff_res5_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']]
self.ff_res6_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res6_min']]
self.ff_res6_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res6_max']]
self.ff_res6_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']]
self.ff_res7_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res7_min']]
self.ff_res7_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res7_max']]
self.ff_res7_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']]
self.ff_res8_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res8_min']]
self.ff_res8_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res8_max']]
self.ff_res8_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']]
self.ff_res9_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res9_min']]
self.ff_res9_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res9_max']]
self.ff_res9_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']]
self.ff_res10_min_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res10_min']]
self.ff_res10_max_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res10_max']]
self.ff_res10_prec_from_DB = ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']]
self.var_res_combined_min_entries_from_DB_list = [self.ff_var1_min_from_DB, self.ff_var2_min_from_DB, self.ff_var3_min_from_DB, self.ff_var4_min_from_DB, self.ff_var5_min_from_DB,
self.ff_var6_min_from_DB, self.ff_var7_min_from_DB, self.ff_var8_min_from_DB, self.ff_var9_min_from_DB, self.ff_var10_min_from_DB,
self.ff_var11_min_from_DB, self.ff_var12_min_from_DB, self.ff_var13_min_from_DB, self.ff_var14_min_from_DB, self.ff_var15_min_from_DB,
self.ff_res1_min_from_DB, self.ff_res2_min_from_DB, self.ff_res3_min_from_DB, self.ff_res4_min_from_DB, self.ff_res5_min_from_DB,
self.ff_res6_min_from_DB, self.ff_res7_min_from_DB, self.ff_res8_min_from_DB, self.ff_res9_min_from_DB, self.ff_res10_min_from_DB]
self.var_res_combined_max_entries_from_DB_list = [self.ff_var1_max_from_DB, self.ff_var2_max_from_DB, self.ff_var3_max_from_DB, self.ff_var4_max_from_DB, self.ff_var5_max_from_DB,
self.ff_var6_max_from_DB, self.ff_var7_max_from_DB, self.ff_var8_max_from_DB, self.ff_var9_max_from_DB, self.ff_var10_max_from_DB,
self.ff_var11_max_from_DB, self.ff_var12_max_from_DB, self.ff_var13_max_from_DB, self.ff_var14_max_from_DB, self.ff_var15_max_from_DB,
self.ff_res1_max_from_DB, self.ff_res2_max_from_DB, self.ff_res3_max_from_DB, self.ff_res4_max_from_DB, self.ff_res5_max_from_DB,
self.ff_res6_max_from_DB, self.ff_res7_max_from_DB, self.ff_res8_max_from_DB, self.ff_res9_max_from_DB, self.ff_res10_max_from_DB]
self.var_prec_entry_from_DB_list = [self.ff_var1_prec_from_DB, self.ff_var2_prec_from_DB, self.ff_var3_prec_from_DB, self.ff_var4_prec_from_DB, self.ff_var5_prec_from_DB,
self.ff_var6_prec_from_DB, self.ff_var7_prec_from_DB, self.ff_var8_prec_from_DB, self.ff_var9_prec_from_DB, self.ff_var10_prec_from_DB,
self.ff_var11_prec_from_DB, self.ff_var12_prec_from_DB, self.ff_var13_prec_from_DB, self.ff_var14_prec_from_DB, self.ff_var15_prec_from_DB]
self.res_min_entry_from_DB_list = [self.ff_res1_min_from_DB, self.ff_res2_min_from_DB, self.ff_res3_min_from_DB, self.ff_res4_min_from_DB, self.ff_res5_min_from_DB,
self.ff_res6_min_from_DB, self.ff_res7_min_from_DB, self.ff_res8_min_from_DB, self.ff_res9_min_from_DB, self.ff_res10_min_from_DB]
self.res_max_entry_from_DB_list = [self.ff_res1_max_from_DB, self.ff_res2_max_from_DB, self.ff_res3_max_from_DB, self.ff_res4_max_from_DB, self.ff_res5_max_from_DB,
self.ff_res6_max_from_DB, self.ff_res7_max_from_DB, self.ff_res8_max_from_DB, self.ff_res9_max_from_DB, self.ff_res10_max_from_DB]
print("Aufruf der Formelberechnung Formeln 1-10...")
# Formel ausrechnen, wenn eine im Eingabefeld vorhanden ist
if ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res1_min_from_DB, self.ff_res1_max_from_DB, self.ff_res1_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 1)
if ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res2_min_from_DB, self.ff_res2_max_from_DB, self.ff_res2_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 2)
if ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res3_min_from_DB, self.ff_res3_max_from_DB, self.ff_res3_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 3)
if ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res4_min_from_DB, self.ff_res4_max_from_DB, self.ff_res4_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 4)
if ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res5_min_from_DB, self.ff_res5_max_from_DB, self.ff_res5_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 5)
if ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res6_min_from_DB, self.ff_res6_max_from_DB, self.ff_res6_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 6)
if ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res7_min_from_DB, self.ff_res7_max_from_DB, self.ff_res7_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 7)
if ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res8_min_from_DB, self.ff_res8_max_from_DB, self.ff_res8_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 8)
if ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res9_min_from_DB, self.ff_res9_max_from_DB, self.ff_res9_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 9)
if ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']] != "":
Formelfrage.ff_calculate_value_range_from_formula_in_DB(self, ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']], self.var_res_combined_min_entries_from_DB_list, self.var_res_combined_max_entries_from_DB_list, self.var_prec_entry_from_DB_list, self.ff_res10_min_from_DB, self.ff_res10_max_from_DB, self.ff_res10_prec_from_DB, self.res_min_entry_from_DB_list, self.res_max_entry_from_DB_list, record_id, 10)
def ff_calculate_value_range_from_formula_in_DB(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_prec_entries_list, res_min_entry, res_max_entry, res_prec_entry, res_min_entries_list, res_max_entries_list, record_id, res_number):
def value_range_lower_upper_bounds(var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_lower_bound_list, var_upper_bound_list):
print("Obere/Untere Grenzen berechnen...")
print(var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_lower_bound_list, var_upper_bound_list)
for u in range(len(var_res_combined_min_entries_list)):
if var_res_combined_min_entries_list[u] != '' and var_res_combined_max_entries_list[u] != '' :
try:
var_lower_bound_list[u], var_upper_bound_list[u] = int(var_res_combined_min_entries_list[u]), int(var_res_combined_max_entries_list[u])
except ValueError:
var_lower_bound_list[u], var_upper_bound_list[u] = float(var_res_combined_min_entries_list[u]), float(var_res_combined_max_entries_list[u])
else:
var_lower_bound_list[u], var_upper_bound_list[u] = 0, 0
def min_max(col):
return pd.Series(index=['min', 'max'], data=[col.min(), col.max()])
# Alle Formeln berechnen die KEIN $r enthalten (nur variablen)
self.var1_lower, self.var1_upper = 0, 0
self.var2_lower, self.var2_upper = 0, 0
self.var3_lower, self.var3_upper = 0, 0
self.var4_lower, self.var4_upper = 0, 0
self.var5_lower, self.var5_upper = 0, 0
self.var6_lower, self.var6_upper = 0, 0
self.var7_lower, self.var7_upper = 0, 0
self.var8_lower, self.var8_upper = 0, 0
self.var9_lower, self.var9_upper = 0, 0
self.var10_lower, self.var10_upper = 0, 0
self.var11_lower, self.var11_upper = 0, 0
self.var12_lower, self.var12_upper = 0, 0
self.var13_lower, self.var13_upper = 0, 0
self.var14_lower, self.var14_upper = 0, 0
self.var15_lower, self.var15_upper = 0, 0
self.res1_lower, self.res1_upper = 0, 0
self.res2_lower, self.res2_upper = 0, 0
self.res3_lower, self.res3_upper = 0, 0
self.res4_lower, self.res4_upper = 0, 0
self.res5_lower, self.res5_upper = 0, 0
self.res6_lower, self.res6_upper = 0, 0
self.res7_lower, self.res7_upper = 0, 0
self.res8_lower, self.res8_upper = 0, 0
self.res9_lower, self.res9_upper = 0, 0
self.res10_lower, self.res10_upper = 0, 0
self.new_list = []
self.new_list2 = []
self.set_nr_of_var_index = []
self.var_prec_entry_list_values = []
self.lower_list = [self.var1_lower, self.var2_lower, self.var3_lower, self.var4_lower, self.var5_lower,
self.var6_lower, self.var7_lower, self.var8_lower, self.var9_lower, self.var10_lower,
self.var11_lower, self.var12_lower, self.var13_lower, self.var14_lower, self.var15_lower,
self.res1_lower, self.res2_lower, self.res3_lower, self.res4_lower, self.res5_lower,
self.res6_lower, self.res7_lower, self.res8_lower, self.res9_lower, self.res10_lower]
self.upper_list = [self.var1_upper, self.var2_upper, self.var3_upper, self.var4_upper, self.var5_upper,
self.var6_upper, self.var7_upper, self.var8_upper, self.var9_upper, self.var10_upper,
self.var11_upper, self.var12_upper, self.var13_upper, self.var14_upper, self.var15_upper,
self.res1_upper, self.res2_upper, self.res3_upper, self.res4_upper, self.res5_upper,
self.res6_upper, self.res7_upper, self.res8_upper, self.res9_upper, self.res10_upper]
self.new_dict = {"row['a']": 'a',
"row['b']": 'b',
"row['c']": 'c',
"row['d']": 'd',
"row['e']": 'e',
"row['f']": 'f',
"row['g']": 'g',
"row['h']": 'h',
"row['i']": 'i',
"row['j']": 'j',
"row['k']": 'k',
"row['l']": 'l',
"row['m']": 'm',
"row['n']": 'n',
"row['o']": 'o',
"row['p']": 'p',
"row['q']": 'q',
"row['r']": 'r',
"row['s']": 's',
"row['t']": 't',
"row['u']": 'u',
"row['v']": 'v',
"row['w']": 'w',
"row['x']": 'x',
"row['y']": 'y' }
self.list_index_dict = {'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'l': 11,
'm': 12,
'n': 13,
'o': 14,
'p': 15,
'q': 16,
'r': 17,
's': 18,
't': 19,
'u': 20,
'v': 21,
'w': 22,
'x': 23,
'y': 24,
}
values = []
# Number of values per range
N = 5
# ersetzt formel durch numpy expressions: z.B. 2^5 -> 2**5, $v1*2+$v3 -> row[a] *2+ row[c]
self.formula_1_numpy_expression = Formelfrage.ff_calculate_value_range_replace_formula_numpy_DB(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, res_min_entries_list, res_max_entries_list)
if self.formula_1_numpy_expression != None and self.formula_1_numpy_expression != "NaN":
print("ÜÜÜÜÜ")
print(self.formula_1_numpy_expression)
# neue formel wird nach leerzeichen gesplittet um einzelne 'row[a]' durch 'a' zu ersetzen
self.new_list = self.formula_1_numpy_expression.split(' ')
print("new_list")
print(self.new_list)
self.exp_as_func = eval('lambda row: ' + self.formula_1_numpy_expression)
# self.exp_as_func is not iterable, therefore it is assigned to function[]
functions = [self.exp_as_func]
value_range_lower_upper_bounds(var_res_combined_min_entries_list, var_res_combined_max_entries_list, self.lower_list, self.upper_list)
# ersetzen: 'row[a]' -> 'a' als neue Liste
for i in range(len(self.new_list)):
if "row" in self.new_list[i]:
if self.new_dict[self.new_list[i]] not in self.new_list2:
self.new_list2.append(self.new_dict[self.new_list[i]])
self.set_nr_of_var_index = sorted(self.new_list2)
self.max_index_nr = self.list_index_dict[self.set_nr_of_var_index[-1]] + 1
# Berechnung der Formel. "linspace" erstellt "N" Werte zwischen zwei Grenzen -> linspace(0,10,N) N=11 --> 0,1,2,3,4,5,6,7,8,9,10
for p in range(len(self.set_nr_of_var_index)):
values.append(np.linspace(self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p]]], self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]], N))
df = pd.DataFrame(cartesian_product(values), index=self.set_nr_of_var_index).T
"""
if res_prec_entry != "":
self.var_prec_highest_value = res_prec_entry
else:
for i in range(len(var_prec_entries_list)):
self.var_prec_entry_list_values.append(int(var_prec_entries_list[i]))
self.var_prec_highest_value = max(self.var_prec_entry_list_values)
"""
#pd.options.display.float_format = '{:,.3f}'.format
for i, f in enumerate(functions):
df[f'f_{i + 1}'] = df.apply(f, axis=1)
df1 = df.apply(pd.to_numeric, errors='coerce')
#print(df1)
print()
print("Ergebnis berechnet aus DB!")
print(df1.apply(min_max).iloc[0]['f_1'])
print(df1.apply(min_max).iloc[1]['f_1'])
print("////////////////////////")
self.res_min_calc_value = df1.apply(min_max).iloc[0]['f_1']
self.res_max_calc_value = df1.apply(min_max).iloc[1]['f_1']
#"{:.2f}".format(a_float)
#res_min_entry.delete(0, END)
#res_min_entry.insert(END, str("{:.2f}".format(self.res_min_calc_value)))
#res_max_entry.delete(0, END)
#res_max_entry.insert(END, str(self.res_max_calc_value))
print("DAS IST DIE OID NUMMER: ", record_id)
# Verbindung mit der Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
#sql_update_query = "UPDATE " + self.ff_database_table + " SET res1_min=?, res1_max=? WHERE id=?",( res_min_entry, res_max_entry, record_id)
c.execute("UPDATE " + self.ff_database_table + " SET res" + str(res_number) + "_min=?, res" + str(res_number) + "_max=? WHERE oid=?",( self.res_min_calc_value, self.res_max_calc_value, record_id))
conn.commit()
conn.close()
# Prüfen ob $r.. in Formeln enthalten
for i in range(len(self.res_formula_entry_list)):
for j in range(1,10):
if "$r" + str(j) in str(self.res_formula_entry_list[i]):
print("$r" + str(j) + " found!", self.res_formula_entry_list[i])
if self.res_min_entry_list[j-1].get() != "" and self.res_max_entry_list[j-1] != "":
print("---", self.res_min_entry_list[j-1].get(), self.res_max_entry_list[j-1])
def ff_calculate_value_range_replace_formula_numpy_DB(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, res_min_entries_list, res_max_entries_list):
self.formula = formula
self.formula_var_replaced = formula.replace('$', '_')
self.np_variables_translator_dict = {"pi": "np.pi",
",": ".",
"^": "**",
"e": "*10**",
"sin": "np.sin",
"cos": "np.cos",
"tan": "np.tan",
"arcsin": "np.arcsin",
"arccos": "np.arccos",
"arctan": "np.arctan",
"sinh": "np.sinh",
"cosh": "np.cosh",
"tanh": "np.tanh",
"arcsinh": "np.arcsinh",
"arccosh": "np.arccosh",
"arctanh": "np.arctanh",
"sqrt": "np.sqrt",
"abs": "np.abs",
"ln": "np.ln",
"log": "np.log",
"_v1": " row['a'] ",
"_v2": " row['b'] ",
"_v3": " row['c'] ",
"_v4": " row['d'] ",
"_v5": " row['e'] ",
"_v6": " row['f'] ",
"_v7": " row['g'] ",
"_v8": " row['h'] ",
"_v9": " row['i'] ",
"_v10": " row['j'] ",
"_v11": " row['k'] ",
"_v12": " row['l'] ",
"_v13": " row['m'] ",
"_v14": " row['n'] ",
"_v15": " row['o'] "}
self.np_results_translator_dict = {
"_r1": " row['p'] ",
"_r2": " row['q'] ",
"_r3": " row['r'] ",
"_r4": " row['s'] ",
"_r5": " row['t'] ",
"_r6": " row['u'] ",
"_r7": " row['v'] ",
"_r8": " row['w'] ",
"_r9": " row['x'] ",
"_r10": " row['y'] "}
print("----------------------")
print("Übernehme Formel aus Eingabefeld")
print("---> ", self.formula)
print("Prüfe auf Grenzen")
def replace_var(match):
return self.np_variables_translator_dict[match.group(0)]
def replace_res(match):
return self.np_results_translator_dict[match.group(0)]
print("====OUTPUT===")
self.formula_var_replaced = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in self.np_variables_translator_dict),replace_var, self.formula_var_replaced)
print(self.formula_var_replaced)
#for key in self.np_variables_translator_dict.keys():
# self.formula_var_replaced = self.formula_var_replaced.replace(key, self.np_variables_translator_dict[key])
self.formula_res_replaced = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in self.np_results_translator_dict),replace_res, self.formula_var_replaced)
print("FORMULA REPLACED")
print(self.formula_res_replaced)
#for key in self.np_results_translator_dict.keys():
# self.formula_res_replaced = self.formula_res_replaced.replace(key, self.np_results_translator_dict[key])
for i in range(len(var_res_combined_min_entries_list)):
if "$v" + (str(i+1)) in formula and var_res_combined_min_entries_list[i] != "" and var_res_combined_max_entries_list[i] != "":
self.formula = self.formula_var_replaced
for j in range(len(res_min_entries_list)):
if "$r" + (str(j+1)) in formula:
if res_min_entries_list[j] != "" and res_max_entries_list[j] != "":
print("Grenzen verfügbar! --> Ersetze alle Symbole mit numpy-symoblik")
self.formula = self.formula_res_replaced
else:
self.formula = "NaN"
if "$r" + (str(i+1)) in formula and var_res_combined_min_entries_list[i] != "" and var_res_combined_max_entries_list[i] != "":
self.formula = self.formula_res_replaced
print("retun formula", self.formula)
return self.formula
############# DATENBANK FUNKTIONEN
def ff_save_id_to_db(self):
conn = sqlite3.connect(self.database_formelfrage_path)
c =conn.cursor()
# format of duration P0Y0M0DT0H30M0S
self.ff_test_time = "P0Y0M0DT" + self.ff_proc_hours_box.get() + "H" + self.ff_proc_minutes_box.get() + "M" + self.ff_proc_seconds_box.get() + "S"
# Bild 1
if self.ff_description_img_name_1 != "" and self.ff_description_img_name_1 != "EMPTY":
# read image data in byte format
with open(os.path.join(self.project_root_path, self.ff_description_img_path_1), 'rb') as image_file_1:
self.ff_description_img_data_1 = image_file_1.read()
else:
self.ff_description_img_name_1 = ""
self.ff_description_img_path_1 = ""
self.ff_description_img_data_1 = ""
# Bild 2
if self.ff_description_img_name_2 != "" and self.ff_description_img_name_2 != "EMPTY":
# read image data in byte format
with open(self.ff_description_img_path_2, 'rb') as image_file_2:
self.ff_description_img_data_2 = image_file_2.read()
else:
self.ff_description_img_name_2 = ""
self.ff_description_img_path_2 = ""
self.ff_description_img_data_2 = ""
# Bild 3
if self.ff_description_img_name_3 != "" and self.ff_description_img_name_3 != "EMPTY":
# read image data in byte format
with open(self.ff_description_img_path_3, 'rb') as image_file_3:
self.ff_description_img_data_3 = image_file_3.read()
else:
self.ff_description_img_name_3 = ""
self.ff_description_img_path_3 = ""
self.ff_description_img_data_3 = ""
# Insert into Table
c.execute(
"INSERT INTO " + self.ff_database_table + " VALUES (" + self.db_column_names_string + ")",
{
'question_difficulty': self.ff_question_difficulty_entry.get(),
'question_category': self.ff_question_category_entry.get(),
'question_type': self.ff_question_type_entry.get(),
'question_title': self.ff_question_title_entry.get(),
'question_description_title': self.ff_question_description_title_entry.get(),
# The first part, "1.0" means that the input should be read from line one, character zero (ie: the very first character).
# END is an imported constant which is set to the string "end". The END part means to read until the end of the text box is reached.
# The only issue with this is that it actually adds a newline to our input. "
# "So, in order to fix it we should change END to end-1c(Thanks <NAME>) The -1c deletes 1 character, while -2c would mean delete two characters, and so on."
'question_description_main': self.ff_question_description_main_entry.get("1.0", 'end-1c'),
'res1_formula': self.res1_formula_entry.get(),
'res2_formula': self.res2_formula_entry.get(),
'res3_formula': self.res3_formula_entry.get(),
'res4_formula': self.res4_formula_entry.get(),
'res5_formula': self.res5_formula_entry.get(),
'res6_formula': self.res6_formula_entry.get(),
'res7_formula': self.res7_formula_entry.get(),
'res8_formula': self.res8_formula_entry.get(),
'res9_formula': self.res9_formula_entry.get(),
'res10_formula': self.res10_formula_entry.get(),
'var1_name': self.var1_name_entry.get(),
'var1_min': self.var1_min_entry.get(),
'var1_max': self.var1_max_entry.get(),
'var1_prec': self.var1_prec_entry.get(),
'var1_divby': self.var1_divby_entry.get(),
'var1_unit': "",
'var2_name': self.var2_name_entry.get(),
'var2_min': self.var2_min_entry.get(),
'var2_max': self.var2_max_entry.get(),
'var2_prec': self.var2_prec_entry.get(),
'var2_divby': self.var2_divby_entry.get(),
'var2_unit': "",
'var3_name': self.var3_name_entry.get(),
'var3_min': self.var3_min_entry.get(),
'var3_max': self.var3_max_entry.get(),
'var3_prec': self.var3_prec_entry.get(),
'var3_divby': self.var3_divby_entry.get(),
'var3_unit': "",
'var4_name': self.var4_name_entry.get(),
'var4_min': self.var4_min_entry.get(),
'var4_max': self.var4_max_entry.get(),
'var4_prec': self.var4_prec_entry.get(),
'var4_divby': self.var4_divby_entry.get(),
'var4_unit': "",
'var5_name': self.var5_name_entry.get(),
'var5_min': self.var5_min_entry.get(),
'var5_max': self.var5_max_entry.get(),
'var5_prec': self.var5_prec_entry.get(),
'var5_divby': self.var5_divby_entry.get(),
'var5_unit': "",
'var6_name': self.var6_name_entry.get(),
'var6_min': self.var6_min_entry.get(),
'var6_max': self.var6_max_entry.get(),
'var6_prec': self.var6_prec_entry.get(),
'var6_divby': self.var6_divby_entry.get(),
'var6_unit': "",
'var7_name': self.var7_name_entry.get(),
'var7_min': self.var7_min_entry.get(),
'var7_max': self.var7_max_entry.get(),
'var7_prec': self.var7_prec_entry.get(),
'var7_divby': self.var7_divby_entry.get(),
'var7_unit': "",
'var8_name': self.var8_name_entry.get(),
'var8_min': self.var8_min_entry.get(),
'var8_max': self.var8_max_entry.get(),
'var8_prec': self.var8_prec_entry.get(),
'var8_divby': self.var8_divby_entry.get(),
'var8_unit': "",
'var9_name': self.var9_name_entry.get(),
'var9_min': self.var9_min_entry.get(),
'var9_max': self.var9_max_entry.get(),
'var9_prec': self.var9_prec_entry.get(),
'var9_divby': self.var9_divby_entry.get(),
'var9_unit': "",
'var10_name': self.var10_name_entry.get(),
'var10_min': self.var10_min_entry.get(),
'var10_max': self.var10_max_entry.get(),
'var10_prec': self.var10_prec_entry.get(),
'var10_divby': self.var10_divby_entry.get(),
'var10_unit': "",
'var11_name': self.var11_name_entry.get(),
'var11_min': self.var11_min_entry.get(),
'var11_max': self.var11_max_entry.get(),
'var11_prec': self.var11_prec_entry.get(),
'var11_divby': self.var11_divby_entry.get(),
'var11_unit': "",
'var12_name': self.var12_name_entry.get(),
'var12_min': self.var12_min_entry.get(),
'var12_max': self.var12_max_entry.get(),
'var12_prec': self.var12_prec_entry.get(),
'var12_divby': self.var12_divby_entry.get(),
'var12_unit': "",
'var13_name': self.var13_name_entry.get(),
'var13_min': self.var13_min_entry.get(),
'var13_max': self.var13_max_entry.get(),
'var13_prec': self.var13_prec_entry.get(),
'var13_divby': self.var13_divby_entry.get(),
'var13_unit': "",
'var14_name': self.var14_name_entry.get(),
'var14_min': self.var14_min_entry.get(),
'var14_max': self.var14_max_entry.get(),
'var14_prec': self.var14_prec_entry.get(),
'var14_divby': self.var14_divby_entry.get(),
'var14_unit': "",
'var15_name': self.var15_name_entry.get(),
'var15_min': self.var15_min_entry.get(),
'var15_max': self.var15_max_entry.get(),
'var15_prec': self.var15_prec_entry.get(),
'var15_divby': self.var15_divby_entry.get(),
'var15_unit': "",
'res1_name': self.res1_name_entry.get(),
'res1_min': self.res1_min_entry.get(),
'res1_max': self.res1_max_entry.get(),
'res1_prec': self.res1_prec_entry.get(),
'res1_tol': self.res1_tol_entry.get(),
'res1_points': self.res1_points_entry.get(),
'res1_unit': "",
'res2_name': self.res2_name_entry.get(),
'res2_min': self.res2_min_entry.get(),
'res2_max': self.res2_max_entry.get(),
'res2_prec': self.res2_prec_entry.get(),
'res2_tol': self.res2_tol_entry.get(),
'res2_points': self.res2_points_entry.get(),
'res2_unit': "",
'res3_name': self.res3_name_entry.get(),
'res3_min': self.res3_min_entry.get(),
'res3_max': self.res3_max_entry.get(),
'res3_prec': self.res3_prec_entry.get(),
'res3_tol': self.res3_tol_entry.get(),
'res3_points': self.res3_points_entry.get(),
'res3_unit': "",
'res4_name': self.res4_name_entry.get(),
'res4_min': self.res4_min_entry.get(),
'res4_max': self.res4_max_entry.get(),
'res4_prec': self.res4_prec_entry.get(),
'res4_tol': self.res4_tol_entry.get(),
'res4_points': self.res4_points_entry.get(),
'res4_unit': "",
'res5_name': self.res5_name_entry.get(),
'res5_min': self.res5_min_entry.get(),
'res5_max': self.res5_max_entry.get(),
'res5_prec': self.res5_prec_entry.get(),
'res5_tol': self.res5_tol_entry.get(),
'res5_points': self.res5_points_entry.get(),
'res5_unit': "",
'res6_name': self.res6_name_entry.get(),
'res6_min': self.res6_min_entry.get(),
'res6_max': self.res6_max_entry.get(),
'res6_prec': self.res6_prec_entry.get(),
'res6_tol': self.res6_tol_entry.get(),
'res6_points': self.res6_points_entry.get(),
'res6_unit': "",
'res7_name': self.res7_name_entry.get(),
'res7_min': self.res7_min_entry.get(),
'res7_max': self.res7_max_entry.get(),
'res7_prec': self.res7_prec_entry.get(),
'res7_tol': self.res7_tol_entry.get(),
'res7_points': self.res7_points_entry.get(),
'res7_unit': "",
'res8_name': self.res8_name_entry.get(),
'res8_min': self.res8_min_entry.get(),
'res8_max': self.res8_max_entry.get(),
'res8_prec': self.res8_prec_entry.get(),
'res8_tol': self.res8_tol_entry.get(),
'res8_points': self.res8_points_entry.get(),
'res8_unit': "",
'res9_name': self.res9_name_entry.get(),
'res9_min': self.res9_min_entry.get(),
'res9_max': self.res9_max_entry.get(),
'res9_prec': self.res9_prec_entry.get(),
'res9_tol': self.res9_tol_entry.get(),
'res9_points': self.res9_points_entry.get(),
'res9_unit': "",
'res10_name': self.res10_name_entry.get(),
'res10_min': self.res10_min_entry.get(),
'res10_max': self.res10_max_entry.get(),
'res10_prec': self.res10_prec_entry.get(),
'res10_tol': self.res10_tol_entry.get(),
'res10_points': self.res10_points_entry.get(),
'res10_unit': "",
'description_img_name_1': self.ff_description_img_name_1,
'description_img_data_1': self.ff_description_img_data_1,
'description_img_path_1': self.ff_description_img_path_1,
'description_img_name_2': self.ff_description_img_name_2,
'description_img_data_2': self.ff_description_img_data_2,
'description_img_path_2': self.ff_description_img_path_2,
'description_img_name_3': self.ff_description_img_name_3,
'description_img_data_3': self.ff_description_img_data_3,
'description_img_path_3': self.ff_description_img_path_3,
'test_time': self.ff_test_time,
'var_number': self.ff_numbers_of_answers_box.get(),
'res_number': self.ff_numbers_of_results_box.get(),
'question_pool_tag': self.ff_question_pool_tag_entry.get(),
'question_author': self.ff_question_author_entry.get()
})
conn.commit()
conn.close()
print("Neuer Eintrag in die Formelfrage-Datenbank --> Fragentitel: " + str(self.ff_question_title_entry.get()))
def ff_load_id_from_db(self, entry_to_index_dict):
self.ff_db_entry_to_index_dict = entry_to_index_dict
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
record_id = self.ff_load_box.get()
c.execute("SELECT * FROM %s WHERE oid = %s " % (self.ff_database_table, str(record_id)))
ff_db_records = c.fetchall()
Formelfrage.ff_clear_GUI(self)
for ff_db_record in ff_db_records:
self.ff_question_difficulty_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_difficulty']] )
self.ff_question_category_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_category']])
self.ff_question_type_entry.delete(0, END)
self.ff_question_type_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_type']])
self.ff_question_title_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_title']])
self.ff_question_description_title_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_description_title']])
self.ff_question_description_main_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_description_main']])
self.res1_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']])
self.res2_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']])
self.res3_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']])
self.res4_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']])
self.res5_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']])
self.res6_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']])
self.res7_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']])
self.res8_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']])
self.res9_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']])
self.res10_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']])
self.var1_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_name']])
self.var1_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_min']])
self.var1_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_max']])
self.var1_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']])
self.var1_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_divby']])
self.var2_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_name']])
self.var2_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_min']])
self.var2_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_max']])
self.var2_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']])
self.var2_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_divby']])
self.var3_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_name']])
self.var3_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_min']])
self.var3_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_max']])
self.var3_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']])
self.var3_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_divby']])
self.var4_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_name']])
self.var4_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_min']])
self.var4_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_max']])
self.var4_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']])
self.var4_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_divby']])
self.var5_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_name']])
self.var5_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_min']])
self.var5_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_max']])
self.var5_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']])
self.var5_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_divby']])
self.var6_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_name']])
self.var6_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_min']])
self.var6_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_max']])
self.var6_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']])
self.var6_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_divby']])
self.var7_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_name']])
self.var7_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_min']])
self.var7_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_max']])
self.var7_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']])
self.var7_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_divby']])
self.var8_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_name']])
self.var8_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_min']])
self.var8_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_max']])
self.var8_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']])
self.var8_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_divby']])
self.var9_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_name']])
self.var9_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_min']])
self.var9_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_max']])
self.var9_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']])
self.var9_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_divby']])
self.var10_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_name']])
self.var10_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_min']])
self.var10_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_max']])
self.var10_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']])
self.var10_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_divby']])
self.var11_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_name']])
self.var11_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_min']])
self.var11_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_max']])
self.var11_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']])
self.var11_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_divby']])
self.var12_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_name']])
self.var12_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_min']])
self.var12_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_max']])
self.var12_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']])
self.var12_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_divby']])
self.var13_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_name']])
self.var13_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_min']])
self.var13_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_max']])
self.var13_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']])
self.var13_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_divby']])
self.var14_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_name']])
self.var14_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_min']])
self.var14_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_max']])
self.var14_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']])
self.var14_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_divby']])
self.var15_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_name']])
self.var15_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_min']])
self.var15_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_max']])
self.var15_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']])
self.var15_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_divby']])
self.res1_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_name']])
self.res1_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_min']])
self.res1_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_max']])
self.res1_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']])
self.res1_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_tol']])
self.res1_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_points']])
self.res2_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_name']])
self.res2_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_min']])
self.res2_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_max']])
self.res2_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']])
self.res2_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_tol']])
self.res2_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_points']])
self.res3_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_name']])
self.res3_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_min']])
self.res3_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_max']])
self.res3_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']])
self.res3_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_tol']])
self.res3_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_points']])
self.res4_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_name']])
self.res4_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_min']])
self.res4_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_max']])
self.res4_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']])
self.res4_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_tol']])
self.res4_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_points']])
self.res5_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_name']])
self.res5_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_min']])
self.res5_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_max']])
self.res5_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']])
self.res5_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_tol']])
self.res5_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_points']])
self.res6_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_name']])
self.res6_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_min']])
self.res6_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_max']])
self.res6_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']])
self.res6_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_tol']])
self.res6_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_points']])
self.res7_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_name']])
self.res7_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_min']])
self.res7_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_max']])
self.res7_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']])
self.res7_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_tol']])
self.res7_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_points']])
self.res8_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_name']])
self.res8_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_min']])
self.res8_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_max']])
self.res8_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']])
self.res8_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_tol']])
self.res8_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_points']])
self.res9_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_name']])
self.res9_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_min']])
self.res9_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_max']])
self.res9_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']])
self.res9_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_tol']])
self.res9_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_points']])
self.res10_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_name']])
self.res10_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_min']])
self.res10_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_max']])
self.res10_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']])
self.res10_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_tol']])
self.res10_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_points']])
self.ff_description_img_name_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_1']]
self.ff_description_img_data_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_1']]
self.ff_description_img_path_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_1']]
self.ff_description_img_name_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_2']]
self.ff_description_img_data_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_2']]
self.ff_description_img_path_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_2']]
self.ff_description_img_name_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_3']]
self.ff_description_img_data_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_3']]
self.ff_description_img_path_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_3']]
conn.commit()
conn.close()
if self.ff_var_highlight_question_text.get() == 1:
print("Frage wird MIT Text-Formatierung geladen. --> Fragen-ID: " + str(self.ff_load_box.get()))
test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text(self, self.ff_question_description_main_entry)
else:
print("Frage wird OHNE Text-Formatierung geladen. --> Fragen-ID: " + str(self.ff_load_box.get()))
def ff_edit_id_from_db(self):
# Verbindung mit der Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
# ID der Frage aus dem Eingabefeld "ID Laden" auslesen
record_id = self.ff_load_box.get()
# Format von Testdauer in der XML Datei: P0Y0M0DT0H30M0S
self.ff_test_time = "P0Y0M0DT" + self.ff_proc_hours_box.get() + "H" + self.ff_proc_minutes_box.get() + "M" + self.ff_proc_seconds_box.get() + "S"
# Ist ein Bild-Name vorhanden, dann das Bild über den Pfad einlesen
# Sonst auf "" setzen
# Bilder werden als byte eingelesen "rb" = read byte
# Fragen-Text Bild 1
if self.ff_description_img_name_1 != "" and self.ff_description_img_name_1 != "EMPTY":
with open(os.path.join(self.project_root_path, self.ff_description_img_path_1), 'rb') as description_image_file_1:
self.ff_description_img_data_1 = description_image_file_1.read()
else:
self.ff_description_img_name_1 = ""
self.ff_description_img_data_1 = ""
self.ff_description_img_path_1 = ""
# Fragen-Text Bild 2
if self.ff_description_img_name_2 != "" and self.ff_description_img_name_2 != "EMPTY":
with open( self.ff_description_img_path_2, 'rb') as description_image_file_2:
self.ff_description_img_data_2 = description_image_file_2.read()
else:
self.ff_description_img_name_2 = ""
self.ff_description_img_data_2 = ""
self.ff_description_img_path_2 = ""
# Fragen-Text Bild 3
if self.ff_description_img_name_3 != "" and self.ff_description_img_name_3 != "EMPTY":
with open( self.ff_description_img_path_3, 'rb') as description_image_file_3:
self.ff_description_img_data_3 = description_image_file_3.read()
else:
self.ff_description_img_name_3 = ""
self.ff_description_img_data_3 = ""
self.ff_description_img_path_3 = ""
print("############# EDIT Function ########")
self.edit_list = []
for i in range(len(self.ff_db_column_names_list)):
self.edit_list.append(self.ff_db_column_names_list[i] + " = :" + self.ff_db_column_names_list[i])
self.db_column_names_string_for_edit = ','.join(self.edit_list)
c.execute("UPDATE " + self.ff_database_table + " SET " + self.db_column_names_string_for_edit + " WHERE oid = :oid",
{'question_difficulty': self.ff_question_difficulty_entry.get(),
'question_category': self.ff_question_category_entry.get(),
'question_type': self.ff_question_type_entry.get(),
'question_title': self.ff_question_title_entry.get(),
'question_description_title': self.ff_question_description_title_entry.get(),
'question_description_main': self.ff_question_description_main_entry.get("1.0", 'end-1c'),
'res1_formula': self.res1_formula_entry.get(),
'res2_formula': self.res2_formula_entry.get(),
'res3_formula': self.res3_formula_entry.get(),
'res4_formula': self.res4_formula_entry.get(),
'res5_formula': self.res5_formula_entry.get(),
'res6_formula': self.res6_formula_entry.get(),
'res7_formula': self.res7_formula_entry.get(),
'res8_formula': self.res8_formula_entry.get(),
'res9_formula': self.res9_formula_entry.get(),
'res10_formula': self.res10_formula_entry.get(),
'var1_name': self.var1_name_entry.get(),
'var1_min': self.var1_min_entry.get(),
'var1_max': self.var1_max_entry.get(),
'var1_prec': self.var1_prec_entry.get(),
'var1_divby': self.var1_divby_entry.get(),
'var1_unit': "",
'var2_name': self.var2_name_entry.get(),
'var2_min': self.var2_min_entry.get(),
'var2_max': self.var2_max_entry.get(),
'var2_prec': self.var2_prec_entry.get(),
'var2_divby': self.var2_divby_entry.get(),
'var2_unit': "",
'var3_name': self.var3_name_entry.get(),
'var3_min': self.var3_min_entry.get(),
'var3_max': self.var3_max_entry.get(),
'var3_prec': self.var3_prec_entry.get(),
'var3_divby': self.var3_divby_entry.get(),
'var3_unit': "",
'var4_name': self.var4_name_entry.get(),
'var4_min': self.var4_min_entry.get(),
'var4_max': self.var4_max_entry.get(),
'var4_prec': self.var4_prec_entry.get(),
'var4_divby': self.var4_divby_entry.get(),
'var4_unit': "",
'var5_name': self.var5_name_entry.get(),
'var5_min': self.var5_min_entry.get(),
'var5_max': self.var5_max_entry.get(),
'var5_prec': self.var5_prec_entry.get(),
'var5_divby': self.var5_divby_entry.get(),
'var5_unit': "",
'var6_name': self.var6_name_entry.get(),
'var6_min': self.var6_min_entry.get(),
'var6_max': self.var6_max_entry.get(),
'var6_prec': self.var6_prec_entry.get(),
'var6_divby': self.var6_divby_entry.get(),
'var6_unit': "",
'var7_name': self.var7_name_entry.get(),
'var7_min': self.var7_min_entry.get(),
'var7_max': self.var7_max_entry.get(),
'var7_prec': self.var7_prec_entry.get(),
'var7_divby': self.var7_divby_entry.get(),
'var7_unit': "",
'var8_name': self.var8_name_entry.get(),
'var8_min': self.var8_min_entry.get(),
'var8_max': self.var8_max_entry.get(),
'var8_prec': self.var8_prec_entry.get(),
'var8_divby': self.var8_divby_entry.get(),
'var8_unit': "",
'var9_name': self.var9_name_entry.get(),
'var9_min': self.var9_min_entry.get(),
'var9_max': self.var9_max_entry.get(),
'var9_prec': self.var9_prec_entry.get(),
'var9_divby': self.var9_divby_entry.get(),
'var9_unit': "",
'var10_name': self.var10_name_entry.get(),
'var10_min': self.var10_min_entry.get(),
'var10_max': self.var10_max_entry.get(),
'var10_prec': self.var10_prec_entry.get(),
'var10_divby': self.var10_divby_entry.get(),
'var10_unit': "",
'var11_name': self.var11_name_entry.get(),
'var11_min': self.var11_min_entry.get(),
'var11_max': self.var11_max_entry.get(),
'var11_prec': self.var11_prec_entry.get(),
'var11_divby': self.var11_divby_entry.get(),
'var11_unit': "",
'var12_name': self.var12_name_entry.get(),
'var12_min': self.var12_min_entry.get(),
'var12_max': self.var12_max_entry.get(),
'var12_prec': self.var12_prec_entry.get(),
'var12_divby': self.var12_divby_entry.get(),
'var12_unit': "",
'var13_name': self.var13_name_entry.get(),
'var13_min': self.var13_min_entry.get(),
'var13_max': self.var13_max_entry.get(),
'var13_prec': self.var13_prec_entry.get(),
'var13_divby': self.var13_divby_entry.get(),
'var13_unit': "",
'var14_name': self.var14_name_entry.get(),
'var14_min': self.var14_min_entry.get(),
'var14_max': self.var14_max_entry.get(),
'var14_prec': self.var14_prec_entry.get(),
'var14_divby': self.var14_divby_entry.get(),
'var14_unit': "",
'var15_name': self.var15_name_entry.get(),
'var15_min': self.var15_min_entry.get(),
'var15_max': self.var15_max_entry.get(),
'var15_prec': self.var15_prec_entry.get(),
'var15_divby': self.var15_divby_entry.get(),
'var15_unit': "",
'res1_name': self.res1_name_entry.get(),
'res1_min': self.res1_min_entry.get(),
'res1_max': self.res1_max_entry.get(),
'res1_prec': self.res1_prec_entry.get(),
'res1_tol': self.res1_tol_entry.get(),
'res1_points': self.res1_points_entry.get(),
'res1_unit': "",
'res2_name': self.res2_name_entry.get(),
'res2_min': self.res2_min_entry.get(),
'res2_max': self.res2_max_entry.get(),
'res2_prec': self.res2_prec_entry.get(),
'res2_tol': self.res2_tol_entry.get(),
'res2_points': self.res2_points_entry.get(),
'res2_unit': "",
'res3_name': self.res3_name_entry.get(),
'res3_min': self.res3_min_entry.get(),
'res3_max': self.res3_max_entry.get(),
'res3_prec': self.res3_prec_entry.get(),
'res3_tol': self.res3_tol_entry.get(),
'res3_points': self.res3_points_entry.get(),
'res3_unit': "",
'res4_name': self.res4_name_entry.get(),
'res4_min': self.res4_min_entry.get(),
'res4_max': self.res4_max_entry.get(),
'res4_prec': self.res4_prec_entry.get(),
'res4_tol': self.res4_tol_entry.get(),
'res4_points': self.res4_points_entry.get(),
'res4_unit': "",
'res5_name': self.res5_name_entry.get(),
'res5_min': self.res5_min_entry.get(),
'res5_max': self.res5_max_entry.get(),
'res5_prec': self.res5_prec_entry.get(),
'res5_tol': self.res5_tol_entry.get(),
'res5_points': self.res5_points_entry.get(),
'res5_unit': "",
'res6_name': self.res6_name_entry.get(),
'res6_min': self.res6_min_entry.get(),
'res6_max': self.res6_max_entry.get(),
'res6_prec': self.res6_prec_entry.get(),
'res6_tol': self.res6_tol_entry.get(),
'res6_points': self.res6_points_entry.get(),
'res6_unit': "",
'res7_name': self.res7_name_entry.get(),
'res7_min': self.res7_min_entry.get(),
'res7_max': self.res7_max_entry.get(),
'res7_prec': self.res7_prec_entry.get(),
'res7_tol': self.res7_tol_entry.get(),
'res7_points': self.res7_points_entry.get(),
'res7_unit': "",
'res8_name': self.res8_name_entry.get(),
'res8_min': self.res8_min_entry.get(),
'res8_max': self.res8_max_entry.get(),
'res8_prec': self.res8_prec_entry.get(),
'res8_tol': self.res8_tol_entry.get(),
'res8_points': self.res8_points_entry.get(),
'res8_unit': "",
'res9_name': self.res9_name_entry.get(),
'res9_min': self.res9_min_entry.get(),
'res9_max': self.res9_max_entry.get(),
'res9_prec': self.res9_prec_entry.get(),
'res9_tol': self.res9_tol_entry.get(),
'res9_points': self.res9_points_entry.get(),
'res9_unit': "",
'res10_name': self.res10_name_entry.get(),
'res10_min': self.res10_min_entry.get(),
'res10_max': self.res10_max_entry.get(),
'res10_prec': self.res10_prec_entry.get(),
'res10_tol': self.res10_tol_entry.get(),
'res10_points': self.res10_points_entry.get(),
'res10_unit': "",
'description_img_name_1': self.ff_description_img_name_1,
'description_img_data_1': self.ff_description_img_data_1,
'description_img_path_1': self.ff_description_img_path_1,
'description_img_name_2': self.ff_description_img_name_2,
'description_img_data_2': self.ff_description_img_data_2,
'description_img_path_2': self.ff_description_img_path_2,
'description_img_name_3': self.ff_description_img_name_3,
'description_img_data_3': self.ff_description_img_data_3,
'description_img_path_3': self.ff_description_img_path_3,
'test_time': self.ff_test_time,
'var_number': "",
'res_number': "",
'question_pool_tag': self.ff_question_pool_tag_entry.get(),
'question_author': self.ff_question_author_entry.get(),
'oid': record_id
})
conn.commit()
conn.close()
print("Frage mit ID: '" + record_id + "' editiert")
def ff_delete_id_from_db(self):
self.ff_delete_box_id = ""
self.ff_delete_box_id = self.ff_delete_box.get()
test_generator_modul_datenbanken_erstellen.Delete_Entry_from_Database.__init__(self, self.ff_delete_box_id, self.ff_question_type_name, self.ff_var_delete_all.get(), self.project_root_path, self.ff_db_entry_to_index_dict, self.database_formelfrage_path, self.ff_database, self.ff_database_table, "Formelfrage_DB_export_file.xlsx", "Formelfrage - Database")
def ff_clear_GUI(self):
self.ff_question_difficulty_entry.delete(0, END)
self.ff_question_category_entry.delete(0, END)
#self.ff_question_type_entry.delete(0, END)
self.ff_question_title_entry.delete(0, END)
self.ff_question_description_title_entry.delete(0, END)
self.ff_question_description_main_entry.delete('1.0', 'end-1c')
self.res1_formula_entry.delete(0, END)
self.res2_formula_entry.delete(0, END)
self.res3_formula_entry.delete(0, END)
self.res4_formula_entry.delete(0, END)
self.res5_formula_entry.delete(0, END)
self.res6_formula_entry.delete(0, END)
self.res7_formula_entry.delete(0, END)
self.res8_formula_entry.delete(0, END)
self.res9_formula_entry.delete(0, END)
self.res10_formula_entry.delete(0, END)
self.var1_name_entry.delete(0, END)
self.var1_min_entry.delete(0, END)
self.var1_max_entry.delete(0, END)
self.var1_prec_entry.delete(0, END)
self.var1_divby_entry.delete(0, END)
self.var2_name_entry.delete(0, END)
self.var2_min_entry.delete(0, END)
self.var2_max_entry.delete(0, END)
self.var2_prec_entry.delete(0, END)
self.var2_divby_entry.delete(0, END)
self.var3_name_entry.delete(0, END)
self.var3_min_entry.delete(0, END)
self.var3_max_entry.delete(0, END)
self.var3_prec_entry.delete(0, END)
self.var3_divby_entry.delete(0, END)
self.var4_name_entry.delete(0, END)
self.var4_min_entry.delete(0, END)
self.var4_max_entry.delete(0, END)
self.var4_prec_entry.delete(0, END)
self.var4_divby_entry.delete(0, END)
self.var5_name_entry.delete(0, END)
self.var5_min_entry.delete(0, END)
self.var5_max_entry.delete(0, END)
self.var5_prec_entry.delete(0, END)
self.var5_divby_entry.delete(0, END)
self.var6_name_entry.delete(0, END)
self.var6_min_entry.delete(0, END)
self.var6_max_entry.delete(0, END)
self.var6_prec_entry.delete(0, END)
self.var6_divby_entry.delete(0, END)
self.var7_name_entry.delete(0, END)
self.var7_min_entry.delete(0, END)
self.var7_max_entry.delete(0, END)
self.var7_prec_entry.delete(0, END)
self.var7_divby_entry.delete(0, END)
self.var8_name_entry.delete(0, END)
self.var8_min_entry.delete(0, END)
self.var8_max_entry.delete(0, END)
self.var8_prec_entry.delete(0, END)
self.var8_divby_entry.delete(0, END)
self.var9_name_entry.delete(0, END)
self.var9_min_entry.delete(0, END)
self.var9_max_entry.delete(0, END)
self.var9_prec_entry.delete(0, END)
self.var9_divby_entry.delete(0, END)
self.var10_name_entry.delete(0, END)
self.var10_min_entry.delete(0, END)
self.var10_max_entry.delete(0, END)
self.var10_prec_entry.delete(0, END)
self.var10_divby_entry.delete(0, END)
self.var11_name_entry.delete(0, END)
self.var11_min_entry.delete(0, END)
self.var11_max_entry.delete(0, END)
self.var11_prec_entry.delete(0, END)
self.var11_divby_entry.delete(0, END)
self.var12_name_entry.delete(0, END)
self.var12_min_entry.delete(0, END)
self.var12_max_entry.delete(0, END)
self.var12_prec_entry.delete(0, END)
self.var12_divby_entry.delete(0, END)
self.var13_name_entry.delete(0, END)
self.var13_min_entry.delete(0, END)
self.var13_max_entry.delete(0, END)
self.var13_prec_entry.delete(0, END)
self.var13_divby_entry.delete(0, END)
self.var14_name_entry.delete(0, END)
self.var14_min_entry.delete(0, END)
self.var14_max_entry.delete(0, END)
self.var14_prec_entry.delete(0, END)
self.var14_divby_entry.delete(0, END)
self.var15_name_entry.delete(0, END)
self.var15_min_entry.delete(0, END)
self.var15_max_entry.delete(0, END)
self.var15_prec_entry.delete(0, END)
self.var15_divby_entry.delete(0, END)
self.res1_name_entry.delete(0, END)
self.res1_min_entry.delete(0, END)
self.res1_max_entry.delete(0, END)
self.res1_prec_entry.delete(0, END)
self.res1_tol_entry.delete(0, END)
self.res1_points_entry.delete(0, END)
self.res2_name_entry.delete(0, END)
self.res2_min_entry.delete(0, END)
self.res2_max_entry.delete(0, END)
self.res2_prec_entry.delete(0, END)
self.res2_tol_entry.delete(0, END)
self.res2_points_entry.delete(0, END)
self.res3_name_entry.delete(0, END)
self.res3_min_entry.delete(0, END)
self.res3_max_entry.delete(0, END)
self.res3_prec_entry.delete(0, END)
self.res3_tol_entry.delete(0, END)
self.res3_points_entry.delete(0, END)
self.res4_name_entry.delete(0, END)
self.res4_min_entry.delete(0, END)
self.res4_max_entry.delete(0, END)
self.res4_prec_entry.delete(0, END)
self.res4_tol_entry.delete(0, END)
self.res4_points_entry.delete(0, END)
self.res5_name_entry.delete(0, END)
self.res5_min_entry.delete(0, END)
self.res5_max_entry.delete(0, END)
self.res5_prec_entry.delete(0, END)
self.res5_tol_entry.delete(0, END)
self.res5_points_entry.delete(0, END)
self.res6_name_entry.delete(0, END)
self.res6_min_entry.delete(0, END)
self.res6_max_entry.delete(0, END)
self.res6_prec_entry.delete(0, END)
self.res6_tol_entry.delete(0, END)
self.res6_points_entry.delete(0, END)
self.res7_name_entry.delete(0, END)
self.res7_min_entry.delete(0, END)
self.res7_max_entry.delete(0, END)
self.res7_prec_entry.delete(0, END)
self.res7_tol_entry.delete(0, END)
self.res7_points_entry.delete(0, END)
self.res8_name_entry.delete(0, END)
self.res8_min_entry.delete(0, END)
self.res8_max_entry.delete(0, END)
self.res8_prec_entry.delete(0, END)
self.res8_tol_entry.delete(0, END)
self.res8_points_entry.delete(0, END)
self.res9_name_entry.delete(0, END)
self.res9_min_entry.delete(0, END)
self.res9_max_entry.delete(0, END)
self.res9_prec_entry.delete(0, END)
self.res9_tol_entry.delete(0, END)
self.res9_points_entry.delete(0, END)
self.res10_name_entry.delete(0, END)
self.res10_min_entry.delete(0, END)
self.res10_max_entry.delete(0, END)
self.res10_prec_entry.delete(0, END)
self.res10_tol_entry.delete(0, END)
self.res10_points_entry.delete(0, END)
class Create_Formelfrage_Questions(Formelfrage):
# INIT
# ff_question_structure
# ff_question_variable_structure
# ff_question_results_structure
def __init__(self, db_entry_to_index_dict, ids_in_entry_box, question_type, pool_img_dir, ilias_id_pool_qpl_dir, xml_read_qti_template_path, xml_qti_output_file_path, xml_qpl_output_file_path, max_id_pool_qti_xml, max_id, taxonomy_file_question_pool):
# Gibt die ANzahl der Pools an
# Üblicherweise wird nur 1 Pool erzeugt. Nur bei "Taxonomie getrennt" Erstellung, werden mehrere Pools erzeugt
#self.number_of_pools = 1
self.ff_db_entry_to_index_dict = db_entry_to_index_dict
self.ff_test_entry_splitted = ids_in_entry_box.split(",") #todo das sind die eingegebenen ids welche zu den gewünschten Testfragen gehören
self.qti_file_path_output = xml_qti_output_file_path
self.formelfrage_pool_qpl_file_path_output = xml_qpl_output_file_path
self.ff_mytree = ET.parse(xml_read_qti_template_path)
self.ff_myroot = self.ff_mytree.getroot()
self.ff_question_type_test_or_pool = question_type
self.formelfrage_pool_img_file_path = pool_img_dir # Wird nur bei Erstellung eines Fragen-Pool verwendet. Ordnername wird erst bei Laufzeit erstellt)
self.all_entries_from_db_list = []
self.number_of_entrys = []
self.question_pool_id_list = []
self.question_title_list = []
self.ff_number_of_questions_generated = 1
self.ilias_id_pool_qpl_dir = ilias_id_pool_qpl_dir
self.ff_file_max_id = max_id
self.taxonomy_file_question_pool = taxonomy_file_question_pool
self.ilias_id_pool_qti_xml = max_id_pool_qti_xml
print("\n")
if self.ff_question_type_test_or_pool == "question_test":
print("FORMELFRAGE: ILIAS-TEST WIRD ERSTELLT... ID: " + str(ids_in_entry_box)) #todo die ID ist für eine Frage?
else:
print("FORMELFRAGE: ILIAS-POOL WIRD ERSTELLT... ID: " + str(ids_in_entry_box))
# Mit FF_Datenbank verknüpfen
connect_ff_db = sqlite3.connect(self.database_formelfrage_path)
cursor = connect_ff_db.cursor()
# Prüfen ob alle Einträge generiert werden sollen (checkbox gesetzt)
if self.ff_var_create_question_pool_all_check.get() == 1 and self.ff_var_create_multiple_question_pools_from_tax_check.get() == 0:
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = c.fetchall()
for ff_db_record in ff_db_records:
self.all_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.string_temp = ','.join(map(str, self.all_entries_from_db_list))
self.ff_test_entry_splitted = self.string_temp.split(",")
# Eintrag mit ID "1" entspricht der Vorlage und soll nicht mit erstellt werden
self.ff_test_entry_splitted.pop(0)
print(self.ff_test_entry_splitted)
#print("Number of Pools: " + str(len(self.list_of_lists)))
#self.number_of_pools = len(self.list_of_lists)
# Sämtliche Datenbank Einträge auslesen mit der entsprechenden "oid" (Datenbank ID)
# Datenbank ID wird automatisch bei einem neuen Eintrag erstellt (fortlaufend) und kann nicht beeinflusst werden
cursor.execute("SELECT *, oid FROM %s" % self.ff_database_table) #todo Wo kommt das Database_table her?
ff_db_records = cursor.fetchall()
"""
for pool_number in range(self.number_of_pools):
self.string2_temp = ','.join(map(str, self.list_of_lists[pool_number]))
self.ff_test_entry_splitted = self.string2_temp.split(",")
print("%%%%%%")
print(self.ff_test_entry_splitted)
"""
#todo die einzelnen Inhalte werden aus DB records(zwischenspeicher) in einen weiteren zwischenspeicher der bereits den zutreffenden namen hat zugeordnet um damit eine Frage zu erstellen
for i in range(len(self.ff_test_entry_splitted)): #todo ein durchlauf für jede Frage in die zum test gehört
for ff_db_record in ff_db_records:# todo Inhalt aller Fragen oder nur einer Frage aus der Datenbank die in die Test sollen
if str(ff_db_record[len(ff_db_record) - 1]) == self.ff_test_entry_splitted[i]:#todo da bin ich mir nicht sicher was es unterscheiden soll?
for t in range(len(ff_db_record)):
if ff_db_record[self.ff_db_entry_to_index_dict['question_type']].lower() == self.ff_question_type_name.lower():
self.ff_question_difficulty = ff_db_record[self.ff_db_entry_to_index_dict['question_difficulty']]
self.ff_question_category = ff_db_record[self.ff_db_entry_to_index_dict['question_category']]
self.ff_question_type = ff_db_record[self.ff_db_entry_to_index_dict['question_type']]
self.ff_question_title = ff_db_record[self.ff_db_entry_to_index_dict['question_title']].replace('&', "&")
self.ff_question_description_title = ff_db_record[self.ff_db_entry_to_index_dict['question_description_title']].replace('&', "&")
self.ff_question_description_main = ff_db_record[self.ff_db_entry_to_index_dict['question_description_main']]
self.ff_res1_formula = ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']]
self.ff_res2_formula = ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']]
self.ff_res3_formula = ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']]
self.ff_res4_formula = ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']]
self.ff_res5_formula = ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']]
self.ff_res6_formula = ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']]
self.ff_res7_formula = ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']]
self.ff_res8_formula = ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']]
self.ff_res9_formula = ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']]
self.ff_res10_formula = ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']]
self.ff_var1_name = ff_db_record[self.ff_db_entry_to_index_dict['var1_name']]
self.ff_var1_min = ff_db_record[self.ff_db_entry_to_index_dict['var1_min']]
self.ff_var1_max = ff_db_record[self.ff_db_entry_to_index_dict['var1_max']]
self.ff_var1_prec = ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']]
self.ff_var1_divby = ff_db_record[self.ff_db_entry_to_index_dict['var1_divby']]
self.ff_var1_unit = ff_db_record[self.ff_db_entry_to_index_dict['var1_unit']]
self.ff_var2_name = ff_db_record[self.ff_db_entry_to_index_dict['var2_name']]
self.ff_var2_min = ff_db_record[self.ff_db_entry_to_index_dict['var2_min']]
self.ff_var2_max = ff_db_record[self.ff_db_entry_to_index_dict['var2_max']]
self.ff_var2_prec = ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']]
self.ff_var2_divby = ff_db_record[self.ff_db_entry_to_index_dict['var2_divby']]
self.ff_var2_unit = ff_db_record[self.ff_db_entry_to_index_dict['var2_unit']]
self.ff_var3_name = ff_db_record[self.ff_db_entry_to_index_dict['var3_name']]
self.ff_var3_min = ff_db_record[self.ff_db_entry_to_index_dict['var3_min']]
self.ff_var3_max = ff_db_record[self.ff_db_entry_to_index_dict['var3_max']]
self.ff_var3_prec = ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']]
self.ff_var3_divby = ff_db_record[self.ff_db_entry_to_index_dict['var3_divby']]
self.ff_var3_unit = ff_db_record[self.ff_db_entry_to_index_dict['var3_unit']]
self.ff_var4_name = ff_db_record[self.ff_db_entry_to_index_dict['var4_name']]
self.ff_var4_min = ff_db_record[self.ff_db_entry_to_index_dict['var4_min']]
self.ff_var4_max = ff_db_record[self.ff_db_entry_to_index_dict['var4_max']]
self.ff_var4_prec = ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']]
self.ff_var4_divby = ff_db_record[self.ff_db_entry_to_index_dict['var4_divby']]
self.ff_var4_unit = ff_db_record[self.ff_db_entry_to_index_dict['var4_unit']]
self.ff_var5_name = ff_db_record[self.ff_db_entry_to_index_dict['var5_name']]
self.ff_var5_min = ff_db_record[self.ff_db_entry_to_index_dict['var5_min']]
self.ff_var5_max = ff_db_record[self.ff_db_entry_to_index_dict['var5_max']]
self.ff_var5_prec = ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']]
self.ff_var5_divby = ff_db_record[self.ff_db_entry_to_index_dict['var5_divby']]
self.ff_var5_unit = ff_db_record[self.ff_db_entry_to_index_dict['var5_unit']]
self.ff_var6_name = ff_db_record[self.ff_db_entry_to_index_dict['var6_name']]
self.ff_var6_min = ff_db_record[self.ff_db_entry_to_index_dict['var6_min']]
self.ff_var6_max = ff_db_record[self.ff_db_entry_to_index_dict['var6_max']]
self.ff_var6_prec = ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']]
self.ff_var6_divby = ff_db_record[self.ff_db_entry_to_index_dict['var6_divby']]
self.ff_var6_unit = ff_db_record[self.ff_db_entry_to_index_dict['var6_unit']]
self.ff_var7_name = ff_db_record[self.ff_db_entry_to_index_dict['var7_name']]
self.ff_var7_min = ff_db_record[self.ff_db_entry_to_index_dict['var7_min']]
self.ff_var7_max = ff_db_record[self.ff_db_entry_to_index_dict['var7_max']]
self.ff_var7_prec = ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']]
self.ff_var7_divby = ff_db_record[self.ff_db_entry_to_index_dict['var7_divby']]
self.ff_var7_unit = ff_db_record[self.ff_db_entry_to_index_dict['var7_unit']]
self.ff_var8_name = ff_db_record[self.ff_db_entry_to_index_dict['var8_name']]
self.ff_var8_min = ff_db_record[self.ff_db_entry_to_index_dict['var8_min']]
self.ff_var8_max = ff_db_record[self.ff_db_entry_to_index_dict['var8_max']]
self.ff_var8_prec = ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']]
self.ff_var8_divby = ff_db_record[self.ff_db_entry_to_index_dict['var8_divby']]
self.ff_var8_unit = ff_db_record[self.ff_db_entry_to_index_dict['var8_unit']]
self.ff_var9_name = ff_db_record[self.ff_db_entry_to_index_dict['var9_name']]
self.ff_var9_min = ff_db_record[self.ff_db_entry_to_index_dict['var9_min']]
self.ff_var9_max = ff_db_record[self.ff_db_entry_to_index_dict['var9_max']]
self.ff_var9_prec = ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']]
self.ff_var9_divby = ff_db_record[self.ff_db_entry_to_index_dict['var9_divby']]
self.ff_var9_unit = ff_db_record[self.ff_db_entry_to_index_dict['var9_unit']]
self.ff_var10_name = ff_db_record[self.ff_db_entry_to_index_dict['var10_name']]
self.ff_var10_min = ff_db_record[self.ff_db_entry_to_index_dict['var10_min']]
self.ff_var10_max = ff_db_record[self.ff_db_entry_to_index_dict['var10_max']]
self.ff_var10_prec = ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']]
self.ff_var10_divby = ff_db_record[self.ff_db_entry_to_index_dict['var10_divby']]
self.ff_var10_unit = ff_db_record[self.ff_db_entry_to_index_dict['var10_unit']]
self.ff_var11_name = ff_db_record[self.ff_db_entry_to_index_dict['var11_name']]
self.ff_var11_min = ff_db_record[self.ff_db_entry_to_index_dict['var11_min']]
self.ff_var11_max = ff_db_record[self.ff_db_entry_to_index_dict['var11_max']]
self.ff_var11_prec = ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']]
self.ff_var11_divby = ff_db_record[self.ff_db_entry_to_index_dict['var11_divby']]
self.ff_var11_unit = ff_db_record[self.ff_db_entry_to_index_dict['var11_unit']]
self.ff_var12_name = ff_db_record[self.ff_db_entry_to_index_dict['var12_name']]
self.ff_var12_min = ff_db_record[self.ff_db_entry_to_index_dict['var12_min']]
self.ff_var12_max = ff_db_record[self.ff_db_entry_to_index_dict['var12_max']]
self.ff_var12_prec = ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']]
self.ff_var12_divby = ff_db_record[self.ff_db_entry_to_index_dict['var12_divby']]
self.ff_var12_unit = ff_db_record[self.ff_db_entry_to_index_dict['var12_unit']]
self.ff_var13_name = ff_db_record[self.ff_db_entry_to_index_dict['var13_name']]
self.ff_var13_min = ff_db_record[self.ff_db_entry_to_index_dict['var13_min']]
self.ff_var13_max = ff_db_record[self.ff_db_entry_to_index_dict['var13_max']]
self.ff_var13_prec = ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']]
self.ff_var13_divby = ff_db_record[self.ff_db_entry_to_index_dict['var13_divby']]
self.ff_var13_unit = ff_db_record[self.ff_db_entry_to_index_dict['var13_unit']]
self.ff_var14_name = ff_db_record[self.ff_db_entry_to_index_dict['var14_name']]
self.ff_var14_min = ff_db_record[self.ff_db_entry_to_index_dict['var14_min']]
self.ff_var14_max = ff_db_record[self.ff_db_entry_to_index_dict['var14_max']]
self.ff_var14_prec = ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']]
self.ff_var14_divby = ff_db_record[self.ff_db_entry_to_index_dict['var14_divby']]
self.ff_var14_unit = ff_db_record[self.ff_db_entry_to_index_dict['var14_unit']]
self.ff_var15_name = ff_db_record[self.ff_db_entry_to_index_dict['var15_name']]
self.ff_var15_min = ff_db_record[self.ff_db_entry_to_index_dict['var15_min']]
self.ff_var15_max = ff_db_record[self.ff_db_entry_to_index_dict['var15_max']]
self.ff_var15_prec = ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']]
self.ff_var15_divby = ff_db_record[self.ff_db_entry_to_index_dict['var15_divby']]
self.ff_var15_unit = ff_db_record[self.ff_db_entry_to_index_dict['var15_unit']]
self.ff_res1_name = ff_db_record[self.ff_db_entry_to_index_dict['res1_name']]
self.ff_res1_min = ff_db_record[self.ff_db_entry_to_index_dict['res1_min']]
self.ff_res1_max = ff_db_record[self.ff_db_entry_to_index_dict['res1_max']]
self.ff_res1_prec = ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']]
self.ff_res1_tol = ff_db_record[self.ff_db_entry_to_index_dict['res1_tol']]
self.ff_res1_points = ff_db_record[self.ff_db_entry_to_index_dict['res1_points']]
self.ff_res1_unit = ff_db_record[self.ff_db_entry_to_index_dict['res1_unit']]
self.ff_res2_name = ff_db_record[self.ff_db_entry_to_index_dict['res2_name']]
self.ff_res2_min = ff_db_record[self.ff_db_entry_to_index_dict['res2_min']]
self.ff_res2_max = ff_db_record[self.ff_db_entry_to_index_dict['res2_max']]
self.ff_res2_prec = ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']]
self.ff_res2_tol = ff_db_record[self.ff_db_entry_to_index_dict['res2_tol']]
self.ff_res2_points = ff_db_record[self.ff_db_entry_to_index_dict['res2_points']]
self.ff_res2_unit = ff_db_record[self.ff_db_entry_to_index_dict['res2_unit']]
self.ff_res3_name = ff_db_record[self.ff_db_entry_to_index_dict['res3_name']]
self.ff_res3_min = ff_db_record[self.ff_db_entry_to_index_dict['res3_min']]
self.ff_res3_max = ff_db_record[self.ff_db_entry_to_index_dict['res3_max']]
self.ff_res3_prec = ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']]
self.ff_res3_tol = ff_db_record[self.ff_db_entry_to_index_dict['res3_tol']]
self.ff_res3_points = ff_db_record[self.ff_db_entry_to_index_dict['res3_points']]
self.ff_res3_unit = ff_db_record[self.ff_db_entry_to_index_dict['res3_unit']]
self.ff_res4_name = ff_db_record[self.ff_db_entry_to_index_dict['res4_name']]
self.ff_res4_min = ff_db_record[self.ff_db_entry_to_index_dict['res4_min']]
self.ff_res4_max = ff_db_record[self.ff_db_entry_to_index_dict['res4_max']]
self.ff_res4_prec = ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']]
self.ff_res4_tol = ff_db_record[self.ff_db_entry_to_index_dict['res4_tol']]
self.ff_res4_points = ff_db_record[self.ff_db_entry_to_index_dict['res4_points']]
self.ff_res4_unit = ff_db_record[self.ff_db_entry_to_index_dict['res4_unit']]
self.ff_res5_name = ff_db_record[self.ff_db_entry_to_index_dict['res5_name']]
self.ff_res5_min = ff_db_record[self.ff_db_entry_to_index_dict['res5_min']]
self.ff_res5_max = ff_db_record[self.ff_db_entry_to_index_dict['res5_max']]
self.ff_res5_prec = ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']]
self.ff_res5_tol = ff_db_record[self.ff_db_entry_to_index_dict['res5_tol']]
self.ff_res5_points = ff_db_record[self.ff_db_entry_to_index_dict['res5_points']]
self.ff_res5_unit = ff_db_record[self.ff_db_entry_to_index_dict['res5_unit']]
self.ff_res6_name = ff_db_record[self.ff_db_entry_to_index_dict['res6_name']]
self.ff_res6_min = ff_db_record[self.ff_db_entry_to_index_dict['res6_min']]
self.ff_res6_max = ff_db_record[self.ff_db_entry_to_index_dict['res6_max']]
self.ff_res6_prec = ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']]
self.ff_res6_tol = ff_db_record[self.ff_db_entry_to_index_dict['res6_tol']]
self.ff_res6_points = ff_db_record[self.ff_db_entry_to_index_dict['res6_points']]
self.ff_res6_unit = ff_db_record[self.ff_db_entry_to_index_dict['res6_unit']]
self.ff_res7_name = ff_db_record[self.ff_db_entry_to_index_dict['res7_name']]
self.ff_res7_min = ff_db_record[self.ff_db_entry_to_index_dict['res7_min']]
self.ff_res7_max = ff_db_record[self.ff_db_entry_to_index_dict['res7_max']]
self.ff_res7_prec = ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']]
self.ff_res7_tol = ff_db_record[self.ff_db_entry_to_index_dict['res7_tol']]
self.ff_res7_points = ff_db_record[self.ff_db_entry_to_index_dict['res7_points']]
self.ff_res7_unit = ff_db_record[self.ff_db_entry_to_index_dict['res7_unit']]
self.ff_es8_name = ff_db_record[self.ff_db_entry_to_index_dict['res8_name']]
self.ff_res8_min = ff_db_record[self.ff_db_entry_to_index_dict['res8_min']]
self.ff_res8_max = ff_db_record[self.ff_db_entry_to_index_dict['res8_max']]
self.ff_res8_prec = ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']]
self.ff_res8_tol = ff_db_record[self.ff_db_entry_to_index_dict['res8_tol']]
self.ff_res8_points = ff_db_record[self.ff_db_entry_to_index_dict['res8_points']]
self.ff_res8_unit = ff_db_record[self.ff_db_entry_to_index_dict['res8_unit']]
self.ff_res9_name = ff_db_record[self.ff_db_entry_to_index_dict['res9_name']]
self.ff_res9_min = ff_db_record[self.ff_db_entry_to_index_dict['res9_min']]
self.ff_res9_max = ff_db_record[self.ff_db_entry_to_index_dict['res9_max']]
self.ff_res9_prec = ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']]
self.ff_res9_tol = ff_db_record[self.ff_db_entry_to_index_dict['res9_tol']]
self.ff_res9_points = ff_db_record[self.ff_db_entry_to_index_dict['res9_points']]
self.ff_res9_unit = ff_db_record[self.ff_db_entry_to_index_dict['res9_unit']]
self.ff_res10_name = ff_db_record[self.ff_db_entry_to_index_dict['res10_name']]
self.ff_res10_min = ff_db_record[self.ff_db_entry_to_index_dict['res10_min']]
self.ff_res10_max = ff_db_record[self.ff_db_entry_to_index_dict['res10_max']]
self.ff_res10_prec = ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']]
self.ff_res10_tol = ff_db_record[self.ff_db_entry_to_index_dict['res10_tol']]
self.ff_res10_points = ff_db_record[self.ff_db_entry_to_index_dict['res10_points']]
self.ff_res10_unit = ff_db_record[self.ff_db_entry_to_index_dict['res10_unit']]
self.ff_description_img_name_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_1']]
self.ff_description_img_data_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_1']]
self.ff_description_img_path_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_1']]
self.ff_description_img_name_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_2']]
self.ff_description_img_data_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_2']]
self.ff_description_img_path_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_2']]
self.ff_description_img_name_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_3']]
self.ff_description_img_data_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_3']]
self.ff_description_img_path_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_3']]
self.ff_test_time = ff_db_record[self.ff_db_entry_to_index_dict['test_time']]
self.ff_var_number = ff_db_record[self.ff_db_entry_to_index_dict['var_number']]
self.ff_res_number = ff_db_record[self.ff_db_entry_to_index_dict['res_number']]
self.ff_question_pool_tag = ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']]
self.ff_question_author = ff_db_record[self.ff_db_entry_to_index_dict['question_author']].replace('&', "&")
Create_Formelfrage_Questions.ff_question_structure(self, i)
def ff_question_structure(self, id_nr): #todo wird von der Funktion oben drüber aufgerufen
"""Diese Funktion wandelt die SQL-Einträge in die .xml um, welche anschließend in ILIAS eingespielt werden kann"""
# VARIABLEN
self.ff_response_counter = 0 #wird verwendet zu zählen, wieviele Anworten pro Frage verwendet werden. Bei einer neuer Antwort -> +1
self.ff_question_description_main = test_generator_modul_taxonomie_und_textformatierung.Textformatierung.format_description_text_in_xml(self, self.ff_var_use_latex_on_text_check.get(), self.ff_question_description_main)
#todo es wurden doch bereits alle daten zugeordnet warum wird die Datenbank nochmal verwendet?
# Verbindung zur FF-Datenank
ff_connect = sqlite3.connect(self.database_formelfrage_path)
ff_cursor = ff_connect.cursor()
# Alle Einträge auslesen
ff_cursor.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = ff_cursor.fetchall()
for ff_db_record in ff_db_records:
# Hier werden die Fragen anhand der ID's erstellt
if str(ff_db_record[len(ff_db_record)-1]) == self.ff_test_entry_splitted[id_nr]:
# Bilder für die Beschreibung speichern
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.ff_description_img_name_1, self.ff_description_img_data_1, id_nr, self.ff_question_type_test_or_pool, self.formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.ff_description_img_name_2, self.ff_description_img_data_2, id_nr, self.ff_question_type_test_or_pool, self.formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.ff_description_img_name_3, self.ff_description_img_data_3, id_nr, self.ff_question_type_test_or_pool, self.formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)
# Aufbau für Fragenstruktur "TEST"
if self.ff_question_type_test_or_pool == "question_test":
# XML Struktur aus XML Datei festlegen. Muss nur einmal angelegt werden
questestinterop = ET.Element('questestinterop')
assessment = ET.SubElement(questestinterop, 'assessment')
section = ET.SubElement(assessment, 'section')
item = ET.SubElement(section, 'item')
# Aufbau für Fragenstruktur "POOL"
else:
# XML Struktur aus XML Datei festlegen. Muss nur einmal angelegt werden
questestinterop = ET.Element('questestinterop')
item = ET.SubElement(questestinterop, 'item')
# Zusatz für Taxonomie-Einstellungen
test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question(self,
id_nr,
self.number_of_entrys,
item,
self.formelfrage_pool_qpl_file_path_template,
self.formelfrage_pool_qpl_file_path_output
)
# Struktur für den Formelfragen - Variableen/Lösungen Teil
# Muss für jede Frage neu angelegt/hinzugefügt werden
qticomment = ET.SubElement(item, 'qticomment')
duration = ET.SubElement(item, 'duration')
itemmetadata = ET.SubElement(item, 'itemmetadata')
presentation = ET.SubElement(item, 'presentation')
flow = ET.SubElement(presentation, 'flow')
question_description_material = ET.SubElement(flow, 'material')
question_description_mattext = ET.SubElement(question_description_material, 'mattext')
qtimetadata = ET.SubElement(itemmetadata, 'qtimetadata')
### ------------------------------------------------------- XML Einträge mit Werten füllen
# Fragen-Titel -- "item title" in xml
item.set('title', self.ff_question_title)
# Fragen-Titel Beschreibung
qticomment.text = self.ff_question_description_title
# Testdauer -- "duration" in xml
# wird keine Testzeit eingetragen, wird 1h vorausgewählt
duration.text = self.ff_test_time #todo ist das die Testzeit oder die Zeit für die eine Frage?
if duration.text == "":
duration.text = "P0Y0M0DT1H0M0S"
# -----------------------------------------------------------------------ILIAS VERSION
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "ILIAS_VERSION"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "5.4.10 2020-03-04"
# -----------------------------------------------------------------------QUESTIONTYPE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "QUESTIONTYPE"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "assFormulaQuestion"
# -----------------------------------------------------------------------AUTHOR
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "AUTHOR"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = self.ff_question_author
# -----------------------------------------------------------------------POINTS
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "points"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = str(self.ff_res1_points)
# Fragentitel einsetzen -- "presentation label" in xml
presentation.set('label', self.ff_question_title)
# Fragen-Text (Format) einsetzen -- "mattext_texttype" in xml -- Gibt das Format des Textes an
question_description_mattext.set('texttype', "text/html")
# Fragen-Text (Text) einsetzen -- "mattext_texttype" in xml -- Gibt die eigentliche Fragen-Beschreibung an
# Wenn Bild enthalten ist, dann in Fragenbeschreibung einbetten
question_description_mattext.text = test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main(
self, self.ff_description_img_name_1, self.ff_description_img_data_1,
self.ff_description_img_name_2, self.ff_description_img_data_2,
self.ff_description_img_name_3, self.ff_description_img_data_3,
self.ff_question_description_main, question_description_mattext, question_description_material, id_nr)
# ----------------------------------------------------------------------- Variable
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v1", self.ff_var1_min, self.ff_var1_max, self.ff_var1_prec, self.ff_var1_divby, self.ff_var1_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v2", self.ff_var2_min, self.ff_var2_max, self.ff_var2_prec, self.ff_var2_divby, self.ff_var2_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v3", self.ff_var3_min, self.ff_var3_max, self.ff_var3_prec, self.ff_var3_divby, self.ff_var3_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v4", self.ff_var4_min, self.ff_var4_max, self.ff_var4_prec, self.ff_var4_divby, self.ff_var4_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v5", self.ff_var5_min, self.ff_var5_max, self.ff_var5_prec, self.ff_var5_divby, self.ff_var5_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v6", self.ff_var6_min, self.ff_var6_max, self.ff_var6_prec, self.ff_var6_divby, self.ff_var6_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v7", self.ff_var7_min, self.ff_var7_max, self.ff_var7_prec, self.ff_var7_divby, self.ff_var7_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v8", self.ff_var8_min, self.ff_var8_max, self.ff_var8_prec, self.ff_var8_divby, self.ff_var8_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v9", self.ff_var9_min, self.ff_var9_max, self.ff_var9_prec, self.ff_var9_divby, self.ff_var9_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v10", self.ff_var10_min, self.ff_var10_max, self.ff_var10_prec, self.ff_var10_divby, self.ff_var10_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v11", self.ff_var11_min, self.ff_var11_max, self.ff_var11_prec, self.ff_var11_divby, self.ff_var11_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v12", self.ff_var12_min, self.ff_var12_max, self.ff_var12_prec, self.ff_var12_divby, self.ff_var12_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v13", self.ff_var13_min, self.ff_var13_max, self.ff_var13_prec, self.ff_var13_divby, self.ff_var13_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v14", self.ff_var14_min, self.ff_var14_max, self.ff_var14_prec, self.ff_var14_divby, self.ff_var14_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v15", self.ff_var15_min, self.ff_var15_max, self.ff_var15_prec, self.ff_var15_divby, self.ff_var15_unit)
# ----------------------------------------------------------------------- Solution
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r1", self.ff_res1_formula, self.ff_res1_min, self.ff_res1_max, self.ff_res1_prec, self.ff_res1_tol, self.ff_res1_points, self.ff_res1_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r2", self.ff_res2_formula, self.ff_res2_min, self.ff_res2_max, self.ff_res2_prec, self.ff_res2_tol, self.ff_res2_points, self.ff_res2_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r3", self.ff_res3_formula, self.ff_res3_min, self.ff_res3_max, self.ff_res3_prec, self.ff_res3_tol, self.ff_res3_points, self.ff_res3_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r4", self.ff_res4_formula, self.ff_res4_min, self.ff_res4_max, self.ff_res4_prec, self.ff_res4_tol, self.ff_res4_points, self.ff_res4_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r5", self.ff_res5_formula, self.ff_res5_min, self.ff_res5_max, self.ff_res5_prec, self.ff_res5_tol, self.ff_res5_points, self.ff_res5_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r6", self.ff_res6_formula, self.ff_res6_min, self.ff_res6_max, self.ff_res6_prec, self.ff_res6_tol, self.ff_res6_points, self.ff_res6_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r7", self.ff_res7_formula, self.ff_res7_min, self.ff_res7_max, self.ff_res7_prec, self.ff_res7_tol, self.ff_res7_points, self.ff_res7_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r8", self.ff_res8_formula, self.ff_res8_min, self.ff_res8_max, self.ff_res8_prec, self.ff_res8_tol, self.ff_res8_points, self.ff_res8_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r9", self.ff_res9_formula, self.ff_res9_min, self.ff_res9_max, self.ff_res9_prec, self.ff_res9_tol, self.ff_res9_points, self.ff_res9_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r10", self.ff_res10_formula, self.ff_res10_min, self.ff_res10_max, self.ff_res10_prec, self.ff_res10_tol, self.ff_res10_points, self.ff_res10_unit)
# -----------------------------------------------------------------------ADDITIONAL_CONT_EDIT_MODE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "additional_cont_edit_mode"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "default"
# -----------------------------------------------------------------------EXTERNAL_ID
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "externalId"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "5ea15be69c1e96.43933468"
# Wenn es sich um einen ILIAS-Test handelt, beinhaltet die XML eine Struktur mit mehreren "Zweigen"
# Der letzte "Zweig" --> "len(self.ff_myroot[0]) - 1" (beschreibt das letze Fach) beinhaltet die eigentlichen Fragen
if self.ff_question_type_test_or_pool == "question_test":
self.ff_myroot[0][len(self.ff_myroot[0]) - 1].append(item)
# Wenn es sich um einen ILIAS-Pool handelt, beinhaltet die XML keine Struktur
# Die Frage kann einfach angehangen werden
else:
self.ff_myroot.append(item)
self.ff_mytree.write(self.qti_file_path_output)
print(str(self.ff_number_of_questions_generated) + ".) Formelfrage Frage erstellt! ---> Titel: " + str(self.ff_question_title))
self.ff_number_of_questions_generated += 1
self.ff_collection_of_question_titles.append(self.ff_question_title)
ff_connect.commit()
ff_connect.close()
if self.ff_question_type_test_or_pool == "question_pool":
###### Anpassung der Datei "qpl". Akualisierung des Dateinamens
self.qpl_file = os.path.normpath(os.path.join(self.formelfrage_files_path,"ff_ilias_pool_abgabe", self.ilias_id_pool_qpl_dir, self.ilias_id_pool_qti_xml))
self.mytree = ET.parse(self.qpl_file)
self.myroot = self.mytree.getroot()
for ident_id in self.myroot.iter('Identifier'):
ident_id.set('Entry', "il_0_qpl_" + str(self.ff_file_max_id+1))
self.mytree.write(self.qpl_file)
def ff_question_variables_structure(self, xml_qtimetadata, ff_var_name, ff_var_min, ff_var_max, ff_var_prec, ff_var_divby, ff_var_unit):
# <------------ INIT ----------->
self.ff_var_name = ff_var_name
self.ff_var_min = str(ff_var_min)
self.ff_var_max = str(ff_var_max)
self.ff_var_prec = str(ff_var_prec)
self.ff_var_divby = str(ff_var_divby)
self.ff_var_divby_length = len(str(self.ff_var_divby))
self.ff_var_unit = ff_var_unit
self.ff_var_unit_length = len(str(self.ff_var_unit))
# <------------ FORMELFRAGE VARIABLEN STRUKTUR (in XML) ----------->
qtimetadatafield = ET.SubElement(xml_qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = ff_var_name
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
# Mit Einheiten:
if self.ff_var_unit != "":
fieldentry.text = "a:6:{" \
"s:9:\"precision\";i:" + self.ff_var_prec + ";" \
"s:12:\"intprecision\";s:" + str(self.ff_var_divby_length) + ":\"" + self.ff_var_divby + "\";" \
"s:8:\"rangemin\";d:" + self.ff_var_min + ";" \
"s:8:\"rangemax\";d:" + self.ff_var_max + ";" \
"s:4:\"unit\";s:" + str(self.ff_var_unit_length) + ":\"" + self.ff_var_unit + "\";" \
"s:9:\"unitvalue\";s:" + str(len(Formelfrage.unit_table(self, self.ff_var_unit))) + ":\"" + Formelfrage.unit_table(self, self.ff_var_unit) + "\";" \
"}"
# Ohne Einheiten:
else:
fieldentry.text = "a:6:{" \
"s:9:\"precision\";i:" + self.ff_var_prec + ";" \
"s:12:\"intprecision\";s:" + str(self.ff_var_divby_length) + ":\"" + self.ff_var_divby + "\";" \
"s:8:\"rangemin\";d:" + self.ff_var_min + ";" \
"s:8:\"rangemax\";d:" + self.ff_var_max + ";" \
"s:4:\"unit\";s:0:\"\";" \
"s:9:\"unitvalue\";s:0:\"\";" \
"}"
def ff_question_results_structure(self, xml_qtimetadata, ff_res_name, ff_res_formula, ff_res_min, ff_res_max, ff_res_prec, ff_res_tol, ff_res_points, ff_res_unit):
def replace_words_in_formula(formula):
self.replace_words_dict = {
"$V": "$v",
"$R": "$r",
"=": " ",
"SIN": "sin",
"SINH": "sinh",
"ARCSIN": "arcsin",
"ASIN": "asin",
"ARCSINH": "arcsinh",
"ASINH": "asinh",
"COS": "cos",
"COSH": "cosh",
"ARCCOS": "arccos",
"ACOS": "acos",
"ARCCOSH": "arccosh",
"ACOSH": "acosh",
"TAN": "tan",
"TANH": "tanh",
"ARCTAN": "arctan",
"ATAN": "atan",
"ARCTANH": "arctanh",
"ATANH": "atanh",
"SQRT": "sqrt",
"Wurzel": "sqrt",
"wurzel": "sqrt",
"ABS": "abs",
"LN": "ln",
"LOG": "log"
}
formula = ' '.join([self.replace_words_dict.get(i,i) for i in formula.split()])
return formula
# <------------ INIT ----------->
self.ff_res_name = ff_res_name
self.ff_res_formula = ff_res_formula
self.ff_res_formula_length = len(str(self.ff_res_formula))
self.ff_res_min = str(ff_res_min)
self.ff_res_min_length = len(str(self.ff_res_min))
self.ff_res_max = str(ff_res_max)
self.ff_res_max_length = len(str(self.ff_res_max))
self.ff_res_prec = str(ff_res_prec)
self.ff_res_tol = str(ff_res_tol)
self.ff_res_tol_length = len(str(self.ff_res_tol))
self.ff_res_points = str(ff_res_points)
self.ff_res_unit = ff_res_unit
self.ff_res_unit_length = len(str(self.ff_res_unit))
# ILIAS kann nicht mit "$Vx" statt "$vx" oder "$Rx" statt "$rx" umgehen (kleines statt großes "V" für Variablen)
# In der Ergebnisgleichung darf kein "=" verwendet werden! Es erscheint keine Fehlermeldung, jedoch werden die Ergebnisse
# aus der ILIAS-Berechnung immer auf "0" gesetzt
self.ff_res_formula = replace_words_in_formula(self.ff_res_formula)
# <------------ FORMELFRAGE ERGEBNIS STRUKTUR (in XML) ----------->
# Hier wird die Struktur des Ergebnis-Teils (z.B. $r1) in XML geschrieben
# Wenn der Ergebnisteil mit Einheiten verwendet wird, müssen entsprechend Daten in "resultunits" eingetragen werden
# s for string length: "9" -> precision = "9" characters
# rangemin: "s" for read string-like type --> "10*1000"
qtimetadatafield = ET.SubElement(xml_qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = self.ff_res_name
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
# Mit Einheiten:
if self.ff_res_unit != "":
fieldentry.text = "a:10:{" \
"s:9:\"precision\";i:" + self.ff_res_prec + ";" \
"s:9:\"tolerance\";s:" + self.ff_res_tol_length + ":\"" + self.ff_res_tol + "\";" \
"s:8:\"rangemin\";s:" + self.ff_res_min_length + ":\"" + self.ff_res_min + "\";" \
"s:8:\"rangemax\";s:" + self.ff_res_max_length + ":\"" + self.ff_res_max + "\";" \
"s:6:\"points\";s:1:\"" + self.ff_res_points + "\";" \
"s:7:\"formula\";s:" + self.ff_res_formula_length + ":\"" + self.ff_res_formula + "\";" \
"s:6:\"rating\";s:0:\"\";" \
"s:4:\"unit\";s:" + str(self.ff_res_unit_length) + ":\"" + self.ff_res_unit + "\";" \
"s:9:\"unitvalue\";s:" + str(len(Formelfrage.unit_table(self, self.ff_res_unit))) + ":\"" + Formelfrage.unit_table(self, self.ff_res_unit) + "\";" \
"s:11:\"resultunits\";a:27:{i:0;a:2:{s:4:\"unit\";s:1:\"H\";s:9:\"unitvalue\";s:3:\"125\";}" \
"i:1;a:2:{s:4:\"unit\";s:2:\"mH\";s:9:\"unitvalue\";s:3:\"126\";}" \
"i:2;a:2:{s:4:\"unit\";s:3:\"µH\";s:9:\"unitvalue\";s:3:\"127\";}" \
"i:3;a:2:{s:4:\"unit\";s:2:\"nH\";s:9:\"unitvalue\";s:3:\"128\";}" \
"i:4;a:2:{s:4:\"unit\";s:2:\"kH\";s:9:\"unitvalue\";s:3:\"129\";}" \
"i:5;a:2:{s:4:\"unit\";s:2:\"pH\";s:9:\"unitvalue\";s:3:\"130\";}" \
"i:6;a:2:{s:4:\"unit\";s:1:\"F\";s:9:\"unitvalue\";s:3:\"131\";}" \
"i:7;a:2:{s:4:\"unit\";s:2:\"mF\";s:9:\"unitvalue\";s:3:\"132\";}" \
"i:8;a:2:{s:4:\"unit\";s:3:\"µF\";s:9:\"unitvalue\";s:3:\"133\";}" \
"i:9;a:2:{s:4:\"unit\";s:2:\"nF\";s:9:\"unitvalue\";s:3:\"134\";}" \
"i:10;a:2:{s:4:\"unit\";s:2:\"pF\";s:9:\"unitvalue\";s:3:\"135\";}" \
"i:11;a:2:{s:4:\"unit\";s:1:\"W\";s:9:\"unitvalue\";s:3:\"136\";}" \
"i:12;a:2:{s:4:\"unit\";s:2:\"kW\";s:9:\"unitvalue\";s:3:\"137\";}" \
"i:13;a:2:{s:4:\"unit\";s:2:\"MW\";s:9:\"unitvalue\";s:3:\"138\";}" \
"i:14;a:2:{s:4:\"unit\";s:1:\"V\";s:9:\"unitvalue\";s:3:\"139\";}" \
"i:15;a:2:{s:4:\"unit\";s:2:\"kV\";s:9:\"unitvalue\";s:3:\"140\";}" \
"i:16;a:2:{s:4:\"unit\";s:2:\"mV\";s:9:\"unitvalue\";s:3:\"141\";}" \
"i:17;a:2:{s:4:\"unit\";s:3:\"µV\";s:9:\"unitvalue\";s:3:\"142\";}" \
"i:18;a:2:{s:4:\"unit\";s:2:\"MV\";s:9:\"unitvalue\";s:3:\"143\";}" \
"i:19;a:2:{s:4:\"unit\";s:1:\"A\";s:9:\"unitvalue\";s:3:\"144\";}" \
"i:20;a:2:{s:4:\"unit\";s:2:\"mA\";s:9:\"unitvalue\";s:3:\"145\";}" \
"i:21;a:2:{s:4:\"unit\";s:3:\"µA\";s:9:\"unitvalue\";s:3:\"146\";}" \
"i:22;a:2:{s:4:\"unit\";s:2:\"kA\";s:9:\"unitvalue\";s:3:\"147\";}" \
"i:23;a:2:{s:4:\"unit\";s:3:\"Ohm\";s:9:\"unitvalue\";s:3:\"148\";}" \
"i:24;a:2:{s:4:\"unit\";s:2:\"mW\";s:9:\"unitvalue\";s:3:\"149\";}" \
"i:25;a:2:{s:4:\"unit\";s:4:\"kOhm\";s:9:\"unitvalue\";s:3:\"150\";}" \
"i:26;a:2:{s:4:\"unit\";s:4:\"mOhm\";s:9:\"unitvalue\";s:3:\"151\";}}" \
"}"
# Ohne Einheiten:
else:
fieldentry.text = "a:10:{" \
"s:9:\"precision\";i:" + self.ff_res_prec + ";" \
"s:9:\"tolerance\";s:" + str(self.ff_res_tol_length) + ":\"" + self.ff_res_tol + "\";" \
"s:8:\"rangemin\";s:" + str(self.ff_res_min_length) + ":\"" + self.ff_res_min + "\";" \
"s:8:\"rangemax\";s:" + str(self.ff_res_max_length) + ":\"" + self.ff_res_max + "\";" \
"s:6:\"points\";s:1:\"" + self.ff_res_points + "\";" \
"s:7:\"formula\";s:" + str(self.ff_res_formula_length) + ":\"" + self.ff_res_formula + "\";" \
"s:6:\"rating\";s:0:\"\";" \
"s:4:\"unit\";s:0:\"\";" \
"s:9:\"unitvalue\";s:0:\"\";" \
"s:11:\"resultunits\";a:0:{}" \
"}"
"""
class GUI_settings_window(Formelfrage):
def __init__(self):
# New Window must be "Toplevel" not "Tk()" in order to get Radiobuttons to work properly
self.test_settings_window = Toplevel()
self.test_settings_window.title("Test-Settings")
# Create a ScrolledFrame widget
self.sf_test_settings = ScrolledFrame(self.test_settings_window, width=300,
height=300)
self.sf_test_settings.pack(expand=1, fill="both")
# Bind the arrow keys and scroll wheel
### Bind the arrow keys and scroll wheel
### Funktion hat keine auswirkungen, erzeugt jedoch (vernachlässigbare) Fehler
# self.sf_test_settings.bind_arrow_keys(app)
# self.sf_test_settings.bind_scroll_wheel(app)
# Create a frame within the ScrolledFrame
self.test_settings = self.sf_test_settings.display_widget(Frame)
self.frame1 = LabelFrame(self.test_settings, text="Test Settings Frame1...", padx=5, pady=5)
self.frame1.grid(row=0, column=0, padx=20, pady=10, sticky=NW)
self.frame2 = LabelFrame(self.test_settings, text="Test Settings Frame2...", padx=5, pady=5)
self.frame2.grid(row=0, column=1, padx=20, pady=10, sticky=NW)
self.frame3 = LabelFrame(self.test_settings, text="Test Settings Frame3...", padx=5, pady=5)
self.frame3.grid(row=0, column=2, padx=20, pady=10, sticky=NW)
self.res12_min_listbox_label = Label(self.frame1, text="EINSTELLUNGEN DES TESTS",
font=('Helvetica', 10, 'bold'))
self.res12_min_listbox_label.grid(row=0, column=0, sticky=W, padx=10, pady=(20, 0))
self.res90_min_listbox_label = Label(self.frame1, text="Test-Titel")
self.res90_min_listbox_label.grid(row=1, column=0, sticky=W, padx=10)
self.res91_max_listbox_label = Label(self.frame1, text="Beschreibung")
self.res91_max_listbox_label.grid(row=2, column=0, sticky=W, padx=10)
self.res1_max_listbox_label = Label(self.frame1, text="Auswahl der Testfragen")
self.res1_max_listbox_label.grid(row=4, column=0, sticky=W, padx=10)
self.res1_prec_listbox_label = Label(self.frame1, text="Datenschutz")
self.res1_prec_listbox_label.grid(row=7, column=0, sticky=W, padx=10)
self.res1_tol_listbox_label = Label(self.frame1, text="VERFÜGBARKEIT", font=('Helvetica', 10, 'bold'))
self.res1_tol_listbox_label.grid(row=9, column=0, sticky=W, padx=10, pady=(20, 0))
self.res1_points_listbox_label = Label(self.frame1, text="Online --- not working")
self.res1_points_listbox_label.grid(row=10, column=0, sticky=W, padx=10)
self.res13_points_listbox_label = Label(self.frame1,
text="Zeitlich begrenzte Verfügbarkeit --- not working")
self.res13_points_listbox_label.grid(row=11, column=0, sticky=W, padx=10)
self.res22_tol_listbox_label = Label(self.frame1, text="INFORMATIONEN ZUM EINSTIEG",
font=('Helvetica', 10, 'bold'))
self.res22_tol_listbox_label.grid(row=14, column=0, sticky=W, padx=10, pady=(20, 0))
self.res23_points_listbox_label = Label(self.frame1, text="Einleitung")
self.res23_points_listbox_label.grid(row=15, column=0, sticky=W, padx=10)
self.res24_points_listbox_label = Label(self.frame1, text="Testeigenschaften anzeigen")
self.res24_points_listbox_label.grid(row=16, column=0, sticky=W, padx=10)
self.res31_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: ZUGANG", font=('Helvetica', 10, 'bold'))
self.res31_tol_listbox_label.grid(row=17, column=0, sticky=W, padx=10, pady=(20, 0))
self.test_time_year_label = Label(self.frame1, text="Jahr")
self.test_time_year_label.grid(row=17, column=1, sticky=W)
self.test_time_month_label = Label(self.frame1, text="Mon.")
self.test_time_month_label.grid(row=17, column=1, sticky=W, padx=35)
self.test_time_day_label = Label(self.frame1, text="Tag")
self.test_time_day_label.grid(row=17, column=1, sticky=W, padx=70)
self.test_time_hour_label = Label(self.frame1, text="Std.")
self.test_time_hour_label.grid(row=17, column=1, sticky=W, padx=105)
self.test_time_minute_label = Label(self.frame1, text="Min.")
self.test_time_minute_label.grid(row=17, column=1, sticky=W, padx=140)
self.res32_points_listbox_label = Label(self.frame1, text="Test-Start")
self.res32_points_listbox_label.grid(row=18, column=0, sticky=W, padx=10)
self.res33_points_listbox_label = Label(self.frame1, text="Test-Ende")
self.res33_points_listbox_label.grid(row=19, column=0, sticky=W, padx=10)
self.res34_tol_listbox_label = Label(self.frame1, text="Test-Passwort")
self.res34_tol_listbox_label.grid(row=20, column=0, sticky=W, padx=10)
self.res35_points_listbox_label = Label(self.frame1, text="Nur ausgewählte Teilnehmer")
self.res35_points_listbox_label.grid(row=21, column=0, sticky=W, padx=10)
self.res36_points_listbox_label = Label(self.frame1, text="Anzahl gleichzeitiger Teilnehmer begrenzen")
self.res36_points_listbox_label.grid(row=22, column=0, sticky=W, padx=10)
self.res37_points_listbox_label = Label(self.frame1, text="Inaktivitätszeit der Teilnehmner (in Sek.)")
self.res37_points_listbox_label.grid(row=23, column=0, sticky=W, padx=30)
self.res41_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: STEUERUNG TESTDURCHLAUF",
font=('Helvetica', 10, 'bold'))
self.res41_tol_listbox_label.grid(row=24, column=0, sticky=W, padx=10, pady=(20, 0))
self.res42_points_listbox_label = Label(self.frame1, text="Anzahl von Testdurchläufen begrenzen")
self.res42_points_listbox_label.grid(row=25, column=0, sticky=W, padx=10)
self.res43_points_listbox_label = Label(self.frame1, text="Wartezeit zwischen Durchläufen erzwingen")
self.res43_points_listbox_label.grid(row=26, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer begrenzen")
self.res44_tol_listbox_label.grid(row=27, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer (in Min).")
self.res44_tol_listbox_label.grid(row=28, column=0, sticky=W, padx=30)
self.res44_tol_listbox_label = Label(self.frame1, text="Max. Bearbeitungsdauer für jeden Testlauf zurücksetzen")
self.res44_tol_listbox_label.grid(row=29, column=0, sticky=W, padx=30)
self.res45_points_listbox_label = Label(self.frame1, text="Prüfungsansicht")
self.res45_points_listbox_label.grid(row=30, column=0, sticky=W, padx=10)
self.res45_1_points_listbox_label = Label(self.frame1, text="Titel des Tests")
self.res45_1_points_listbox_label.grid(row=31, column=0, sticky=W, padx=30)
self.res45_2_points_listbox_label = Label(self.frame1, text="Name des Teilnehmers")
self.res45_2_points_listbox_label.grid(row=32, column=0, sticky=W, padx=30)
self.res46_points_listbox_label = Label(self.frame1, text="ILIAS-Prüfungsnummer anzeigen")
self.res46_points_listbox_label.grid(row=33, column=0, sticky=W, padx=10)
self.res51_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: VERHALTEN DER FRAGE",
font=('Helvetica', 10, 'bold'))
self.res51_tol_listbox_label.grid(row=0, column=2, sticky=W, padx=10, pady=(20, 0))
self.res52_points_listbox_label = Label(self.frame2, text="Anzeige der Fragentitel")
self.res52_points_listbox_label.grid(row=1, column=2, sticky=W, padx=10)
self.res53_points_listbox_label = Label(self.frame2, text="Automatisches speichern")
self.res53_points_listbox_label.grid(row=4, column=2, sticky=W, padx=10)
self.res54_tol_listbox_label = Label(self.frame2, text="Fragen mischen")
self.res54_tol_listbox_label.grid(row=5, column=2, sticky=W, padx=10)
self.res55_points_listbox_label = Label(self.frame2, text="Lösungshinweise")
self.res55_points_listbox_label.grid(row=6, column=2, sticky=W, padx=10)
self.res56_points_listbox_label = Label(self.frame2, text="Direkte Rückmeldung --- not working")
self.res56_points_listbox_label.grid(row=7, column=2, sticky=W, padx=10)
self.res57_tol_listbox_label = Label(self.frame2, text="Teilnehmerantworten")
self.res57_tol_listbox_label.grid(row=8, column=2, sticky=W, padx=10)
self.res58_points_listbox_label = Label(self.frame2, text="Verpflichtende Fragen")
self.res58_points_listbox_label.grid(row=12, column=2, sticky=W, padx=10)
self.res61_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: FUNKTIONEN FÜR TEILNEHMER",
font=('Helvetica', 10, 'bold'))
self.res61_tol_listbox_label.grid(row=13, column=2, sticky=W, padx=10, pady=(20, 0))
self.res62_points_listbox_label = Label(self.frame2, text="Verwendung vorheriger Lösungen")
self.res62_points_listbox_label.grid(row=14, column=2, sticky=W, padx=10)
self.res63_points_listbox_label = Label(self.frame2, text="\"Test unterbrechen\" anzeigen")
self.res63_points_listbox_label.grid(row=15, column=2, sticky=W, padx=10)
self.res64_tol_listbox_label = Label(self.frame2, text="Nicht beantwortete Fragen")
self.res64_tol_listbox_label.grid(row=16, column=2, sticky=W, padx=10)
self.res65_points_listbox_label = Label(self.frame2, text="Fragenliste und Bearbeitungsstand anzeigen")
self.res65_points_listbox_label.grid(row=18, column=2, sticky=W, padx=10)
self.res66_points_listbox_label = Label(self.frame2, text="Fragen markieren")
self.res66_points_listbox_label.grid(row=19, column=2, sticky=W, padx=10)
self.res71_tol_listbox_label = Label(self.frame2, text="TEST ABSCHLIESSEN", font=('Helvetica', 10, 'bold'))
self.res71_tol_listbox_label.grid(row=20, column=2, sticky=W, padx=10, pady=(20, 0))
self.res72_points_listbox_label = Label(self.frame2, text="Übersicht gegebener Antworten")
self.res72_points_listbox_label.grid(row=21, column=2, sticky=W, padx=10)
self.res73_points_listbox_label = Label(self.frame2, text="Abschließende Bemerkung")
self.res73_points_listbox_label.grid(row=22, column=2, sticky=W, padx=10)
self.res74_tol_listbox_label = Label(self.frame2, text="Weiterleitung")
self.res74_tol_listbox_label.grid(row=23, column=2, sticky=W, padx=10)
self.res75_points_listbox_label = Label(self.frame2, text="Benachrichtigung")
self.res75_points_listbox_label.grid(row=24, column=2, sticky=W, padx=10)
# --------------------------- DEFINE CHECKBOXES WITH ENTRYS ---------------------------------------
# --------------------------- CHECKBOXES ---------------------------------------
self.var_online = IntVar()
self.check_online = Checkbutton(self.frame1, text="", variable=self.var_online, onvalue=1, offvalue=0)
self.check_online.deselect()
self.check_online.grid(row=10, column=1, sticky=W)
self.var_time_limited = IntVar()
self.time_limited_start_label = Label(self.frame1, text="Start")
self.time_limited_start_day_label = Label(self.frame1, text="Tag")
self.time_limited_start_day_entry = Entry(self.frame1, width=3)
self.time_limited_start_month_label = Label(self.frame1, text="Mo")
self.time_limited_start_month_entry = Entry(self.frame1, width=3)
self.time_limited_start_year_label = Label(self.frame1, text="Jahr")
self.time_limited_start_year_entry = Entry(self.frame1, width=4)
self.time_limited_start_hour_label = Label(self.frame1, text="Std")
self.time_limited_start_hour_entry = Entry(self.frame1, width=3)
self.time_limited_start_minute_label = Label(self.frame1, text="Min")
self.time_limited_start_minute_entry = Entry(self.frame1, width=3)
self.time_limited_end_label = Label(self.frame1, text="Ende")
self.time_limited_end_day_label = Label(self.frame1, text="Tag")
self.time_limited_end_day_entry = Entry(self.frame1, width=3)
self.time_limited_end_month_label = Label(self.frame1, text="Mo")
self.time_limited_end_month_entry = Entry(self.frame1, width=3)
self.time_limited_end_year_label = Label(self.frame1, text="Jahr")
self.time_limited_end_year_entry = Entry(self.frame1, width=4)
self.time_limited_end_hour_label = Label(self.frame1, text="Std")
self.time_limited_end_hour_entry = Entry(self.frame1, width=3)
self.time_limited_end_minute_label = Label(self.frame1, text="Min")
self.time_limited_end_minute_entry = Entry(self.frame1, width=3)
# self.entry.grid(row=11, column=1, sticky=W, padx=20)
self.check_time_limited = Checkbutton(self.frame1, text="", variable=self.var_time_limited, onvalue=1,
offvalue=0,
command=lambda
v=self.var_time_limited: GUI_settings_window.show_entry_time_limited_start(
self, v))
self.check_time_limited.deselect()
self.check_time_limited.grid(row=11, column=1, sticky=W)
self.var_introduction = IntVar()
self.check_introduction = Checkbutton(self.frame1, text="", variable=self.var_introduction, onvalue=1,
offvalue=0,
command=lambda
v=self.var_introduction: GUI_settings_window.show_introduction_textfield(
self, v))
self.check_introduction.deselect()
self.check_introduction.grid(row=15, column=1, sticky=W)
self.var_test_prop = IntVar()
self.check_test_prop = Checkbutton(self.frame1, text="", variable=self.var_test_prop, onvalue=1, offvalue=0)
self.check_test_prop.deselect()
self.check_test_prop.grid(row=16, column=1, sticky=W)
# self.var_test_password = IntVar()
# self.check_test_password = Checkbutton(self.frame1, text="", variable=self.var_test_password, onvalue=1, offvalue=0)
# self.check_test_password.deselect()
# self.check_test_password.grid(row=20, column=1, sticky=W)
self.var_specific_users = IntVar()
self.check_specific_users = Checkbutton(self.frame1, text="", variable=self.var_specific_users, onvalue=1,
offvalue=0)
self.check_specific_users.deselect()
self.check_specific_users.grid(row=21, column=1, sticky=W)
# self.var_fixed_users = IntVar()
# self.check_fixed_users = Checkbutton(self.frame1, text="", variable=self.var_fixed_users, onvalue=1, offvalue=0)
# self.check_fixed_users.deselect()
# self.check_fixed_users.grid(row=22, column=1, sticky=W)
# self.var_limit_test_runs = IntVar()
# self.check_limit_test_runs = Checkbutton(self.frame1, text="", variable=self.var_limit_test_runs, onvalue=1, offvalue=0)
# self.check_limit_test_runs.deselect()
# self.check_limit_test_runs.grid(row=22, column=1, sticky=W)
# self.var_time_betw_test_runs = IntVar()
# self.check_time_betw_test_runs = Checkbutton(self.frame1, text="", variable=self.var_time_betw_test_runs, onvalue=1, offvalue=0)
# self.check_time_betw_test_runs.deselect()
# self.check_time_betw_test_runs.grid(row=25, column=1, sticky=W)
self.var_processing_time = IntVar()
self.check_processing_time = Checkbutton(self.frame1, text="", variable=self.var_processing_time, onvalue=1,
offvalue=0)
self.check_processing_time.deselect()
self.check_processing_time.grid(row=27, column=1, sticky=W)
self.var_processing_time_reset = IntVar()
self.check_processing_time_reset = Checkbutton(self.frame1, text="", variable=self.var_processing_time_reset,
onvalue=1, offvalue=0)
self.check_processing_time_reset.deselect()
self.check_processing_time_reset.grid(row=29, column=1, sticky=W)
self.var_examview = IntVar()
self.check_examview = Checkbutton(self.frame1, text="", variable=self.var_examview, onvalue=1, offvalue=0)
self.check_examview.deselect()
self.check_examview.grid(row=30, column=1, sticky=W)
self.var_examview_test_title = IntVar()
self.check_examview_test_title = Checkbutton(self.frame1, text="", variable=self.var_examview_test_title,
onvalue=1, offvalue=0)
self.check_examview_test_title.deselect()
self.check_examview_test_title.grid(row=31, column=1, sticky=W)
self.var_examview_user_name = IntVar()
self.check_examview_user_name = Checkbutton(self.frame1, text="", variable=self.var_examview_user_name,
onvalue=1, offvalue=0)
self.check_examview_user_name.deselect()
self.check_examview_user_name.grid(row=32, column=1, sticky=W)
self.var_show_ilias_nr = IntVar()
self.check_show_ilias_nr = Checkbutton(self.frame1, text="", variable=self.var_show_ilias_nr, onvalue=1,
offvalue=0)
self.check_show_ilias_nr.deselect()
self.check_show_ilias_nr.grid(row=33, column=1, sticky=W)
self.var_autosave = IntVar()
self.check_autosave = Checkbutton(self.frame2, text="", variable=self.var_autosave, onvalue=1, offvalue=0,
command=lambda v=self.var_autosave: GUI_settings_window.enable_autosave(self,
v))
self.check_autosave_interval_label = Label(self.frame2, text="Speicherintervall (in Sek.):")
self.check_autosave_interval_entry = Entry(self.frame2, width=10)
self.check_autosave.deselect()
self.check_autosave.grid(row=4, column=3, sticky=W)
self.var_mix_questions = IntVar()
self.check_mix_questions = Checkbutton(self.frame2, text="", variable=self.var_mix_questions, onvalue=1,
offvalue=0)
self.check_mix_questions.deselect()
self.check_mix_questions.grid(row=5, column=3, sticky=W)
self.var_show_solution_notes = IntVar()
self.check_show_solution_notes = Checkbutton(self.frame2, text="", variable=self.var_show_solution_notes,
onvalue=1, offvalue=0)
self.check_show_solution_notes.deselect()
self.check_show_solution_notes.grid(row=6, column=3, sticky=W)
self.var_direct_response = IntVar()
self.check_direct_response = Checkbutton(self.frame2, text="", variable=self.var_direct_response, onvalue=1,
offvalue=0)
self.check_direct_response.deselect()
self.check_direct_response.grid(row=7, column=3, sticky=W)
self.var_mandatory_questions = IntVar()
self.check_mandatory_questions = Checkbutton(self.frame2, text="", variable=self.var_mandatory_questions,
onvalue=1, offvalue=0)
self.check_mandatory_questions.deselect()
self.check_mandatory_questions.grid(row=12, column=3, sticky=W)
self.var_use_previous_solution = IntVar()
self.check_use_previous_solution = Checkbutton(self.frame2, text="", variable=self.var_use_previous_solution,
onvalue=1, offvalue=0)
self.check_use_previous_solution.deselect()
self.check_use_previous_solution.grid(row=14, column=3, sticky=W)
self.var_show_test_cancel = IntVar()
self.check_show_test_cancel = Checkbutton(self.frame2, text="", variable=self.var_show_test_cancel, onvalue=1,
offvalue=0)
self.check_show_test_cancel.deselect()
self.check_show_test_cancel.grid(row=15, column=3, sticky=W)
self.var_show_question_list_process_status = IntVar()
self.check_show_question_list_process_status = Checkbutton(self.frame2, text="",
variable=self.var_show_question_list_process_status,
onvalue=1, offvalue=0)
self.check_show_question_list_process_status.deselect()
self.check_show_question_list_process_status.grid(row=18, column=3, sticky=W)
self.var_question_mark = IntVar()
self.check_question_mark = Checkbutton(self.frame2, text="", variable=self.var_question_mark, onvalue=1,
offvalue=0)
self.check_question_mark.deselect()
self.check_question_mark.grid(row=19, column=3, sticky=W)
self.var_overview_answers = IntVar()
self.check_overview_answers = Checkbutton(self.frame2, text="", variable=self.var_overview_answers, onvalue=1,
offvalue=0)
self.check_overview_answers.grid(row=21, column=3, sticky=W)
self.var_show_end_comment = IntVar()
self.check_show_end_comment = Checkbutton(self.frame2, text="", variable=self.var_show_end_comment, onvalue=1,
offvalue=0,
command=lambda
v=self.var_show_end_comment: GUI_settings_window.show_concluding_remarks(
self, v))
self.check_show_end_comment.deselect()
self.check_show_end_comment.grid(row=22, column=3, sticky=W)
self.var_forwarding = IntVar()
self.check_forwarding = Checkbutton(self.frame2, text="", variable=self.var_forwarding, onvalue=1, offvalue=0)
self.check_forwarding.deselect()
self.check_forwarding.grid(row=23, column=3, sticky=W)
self.var_notification = IntVar()
self.check_notification = Checkbutton(self.frame2, text="", variable=self.var_notification, onvalue=1,
offvalue=0)
self.check_notification.deselect()
self.check_notification.grid(row=24, column=3, sticky=W)
# --------------------------- RADIO BUTTONS ---------------------------------------
self.select_question = IntVar()
self.select_question.set(0)
self.select_question_radiobtn1 = Radiobutton(self.frame1, text="Fest definierte Fragenauswahl",
variable=self.select_question, value=0)
self.select_question_radiobtn1.grid(row=4, column=1, pady=0, sticky=W) # FIXED_QUEST_SET
self.select_question_radiobtn2 = Radiobutton(self.frame1, text="Zufällige Fragenauswahl",
variable=self.select_question, value=1)
self.select_question_radiobtn2.grid(row=5, column=1, pady=0, sticky=W) # RANDOM_QUEST_SET
self.select_question_radiobtn3 = Radiobutton(self.frame1,
text="Wiedervorlagemodus - alle Fragen eines Fragenpools",
variable=self.select_question, value=2)
self.select_question_radiobtn3.grid(row=6, column=1, pady=0, sticky=W) # DYNAMIC_QUEST_SET
self.select_anonym = IntVar()
self.select_anonym.set(0)
self.select_anonym_radiobtn1 = Radiobutton(self.frame1, text="Testergebnisse ohne Namen",
variable=self.select_anonym, value=0, borderwidth=0,
command=self.select_anonym.get())
self.select_anonym_radiobtn1.grid(row=7, column=1, pady=0, sticky=W)
self.select_anonym_radiobtn2 = Radiobutton(self.frame1, text="Testergebnisse mit Namen",
variable=self.select_anonym, value=1, borderwidth=0,
command=self.select_anonym.get())
self.select_anonym_radiobtn2.grid(row=8, column=1, pady=0, sticky=W)
self.select_show_question_title = IntVar()
self.select_show_question_title.set(0)
self.select_show_question_title_radiobtn1 = Radiobutton(self.frame2, text="Fragentitel und erreichbare Punkte",
variable=self.select_show_question_title, value=0,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn1.grid(row=1, column=3, pady=0, sticky=W)
self.select_show_question_title_radiobtn2 = Radiobutton(self.frame2, text="Nur Fragentitel",
variable=self.select_show_question_title, value=1,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn2.grid(row=2, column=3, pady=0, sticky=W)
self.select_show_question_title_radiobtn3 = Radiobutton(self.frame2,
text="Weder Fragentitel noch erreichbare Punkte",
variable=self.select_show_question_title, value=2,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn3.grid(row=3, column=3, pady=0, sticky=W)
self.select_user_response = IntVar()
self.select_user_response.set(0)
self.select_user_response_radiobtn1 = Radiobutton(self.frame2,
text="Antworten während des Testdurchlaufs nicht festschreiben",
variable=self.select_user_response, value=0, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn1.grid(row=8, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn2 = Radiobutton(self.frame2,
text="Antworten bei Anzeige der Rückmeldung festschreiben",
variable=self.select_user_response, value=1, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn2.grid(row=9, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn3 = Radiobutton(self.frame2,
text="Antworten bei Anzeige der Folgefrage festschreiben",
variable=self.select_user_response, value=2, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn3.grid(row=10, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn4 = Radiobutton(self.frame2,
text="Antworten mit der Anzeige von Rückmeldungen oder der Folgefrage festschreiben",
variable=self.select_user_response, value=3, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn4.grid(row=11, column=3, pady=0, sticky=W)
self.select_not_answered_questions = IntVar()
self.select_not_answered_questions.set(0)
self.select_not_answered_questions_radiobtn1 = Radiobutton(self.frame2,
text="Nicht beantwortete Fragen bleiben an ihrem Platz",
variable=self.select_not_answered_questions, value=0,
borderwidth=0,
command=self.select_not_answered_questions.get())
self.select_not_answered_questions_radiobtn1.grid(row=16, column=3, pady=0, sticky=W)
self.select_not_answered_questions_radiobtn2 = Radiobutton(self.frame2,
text="Nicht beantwortete Fragen werden ans Testende gesschoben",
variable=self.select_not_answered_questions, value=1,
borderwidth=0,
command=self.select_not_answered_questions.get())
self.select_not_answered_questions_radiobtn2.grid(row=17, column=3, pady=0, sticky=W)
# --------------------------- ENTRY BOXES ---------------------------------------
self.titel_entry = Entry(self.frame1, width=47)
self.titel_entry.grid(row=1, column=1)
self.introduction_bar = Scrollbar(self.frame1)
self.introduction_infobox = Text(self.frame1, height=4, width=40, font=('Helvetica', 9))
self.test_start_year_entry = Entry(self.frame1, width=5)
self.test_start_year_entry.grid(row=18, column=1, sticky=W)
self.test_start_year_entry.insert(0, "YYYY")
self.test_start_month_entry = Entry(self.frame1, width=5)
self.test_start_month_entry.grid(row=18, column=1, sticky=W, padx=35)
self.test_start_month_entry.insert(0, "MM")
self.test_start_day_entry = Entry(self.frame1, width=5)
self.test_start_day_entry.grid(row=18, column=1, sticky=W, padx=70)
self.test_start_day_entry.insert(0, "DD")
self.test_start_hour_entry = Entry(self.frame1, width=5)
self.test_start_hour_entry.grid(row=18, column=1, sticky=W, padx=105)
self.test_start_hour_entry.insert(0, "HH")
self.test_start_minute_entry = Entry(self.frame1, width=5)
self.test_start_minute_entry.grid(row=18, column=1, sticky=W, padx=140)
self.test_start_minute_entry.insert(0, "mm")
self.test_end_year_entry = Entry(self.frame1, width=5)
self.test_end_year_entry.grid(row=19, column=1, sticky=W, pady=5)
self.test_end_year_entry.insert(0, "YYYY")
self.test_end_month_entry = Entry(self.frame1, width=5)
self.test_end_month_entry.grid(row=19, column=1, sticky=W, padx=35)
self.test_end_month_entry.insert(0, "MM")
self.test_end_day_entry = Entry(self.frame1, width=5)
self.test_end_day_entry.grid(row=19, column=1, sticky=W, padx=70)
self.test_end_day_entry.insert(0, "DD")
self.test_end_hour_entry = Entry(self.frame1, width=5)
self.test_end_hour_entry.grid(row=19, column=1, sticky=W, padx=105)
self.test_end_hour_entry.insert(0, "HH")
self.test_end_minute_entry = Entry(self.frame1, width=5)
self.test_end_minute_entry.grid(row=19, column=1, sticky=W, padx=140)
self.test_end_minute_entry.insert(0, "mm")
self.test_password_entry = Entry(self.frame1, width=20)
self.test_password_entry.grid(row=20, column=1, sticky=W, pady=3)
self.description_bar = Scrollbar(self.frame1)
self.description_infobox = Text(self.frame1, height=4, width=40, font=('Helvetica', 9))
self.description_bar.grid(row=2, column=2)
self.description_infobox.grid(row=2, column=1, pady=10)
self.description_bar.config(command=self.description_infobox.yview)
self.description_infobox.config(yscrollcommand=self.description_bar.set)
self.limit_users_max_amount_entry = Entry(self.frame1, width=5)
self.limit_users_max_amount_entry.grid(row=22, column=1, sticky=W)
self.inactivity_time_for_users_entry = Entry(self.frame1, width=5)
self.inactivity_time_for_users_entry.grid(row=23, column=1, sticky=W)
self.inactivity_time_for_users_entry.insert(0, "300")
self.limit_test_runs_entry = Entry(self.frame1, width=10)
self.limit_test_runs_entry.grid(row=25, column=1, sticky=W)
self.limit_test_runs_entry.insert(0, "3")
self.limit_time_betw_test_runs_month_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_month_entry.grid(row=26, column=1, sticky=W, pady=5)
self.limit_time_betw_test_runs_month_entry.insert(0, "MM")
self.limit_time_betw_test_runs_day_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_day_entry.grid(row=26, column=1, sticky=W, padx=35)
self.limit_time_betw_test_runs_day_entry.insert(0, "DD")
self.limit_time_betw_test_runs_hour_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_hour_entry.grid(row=26, column=1, sticky=W, padx=70)
self.limit_time_betw_test_runs_hour_entry.insert(0, "HH")
self.limit_time_betw_test_runs_minute_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_minute_entry.grid(row=26, column=1, sticky=W, padx=105)
self.limit_time_betw_test_runs_minute_entry.insert(0, "mm")
self.limit_processing_time_minutes_entry = Entry(self.frame1, width=5)
self.limit_processing_time_minutes_entry.grid(row=28, column=1, sticky=W)
self.limit_processing_time_minutes_entry.insert(0, "90")
self.concluding_remarks_bar = Scrollbar(self.frame2)
self.concluding_remarks_infobox = Text(self.frame2, height=4, width=40, font=('Helvetica', 9))
self.profile_name_label = Label(self.frame3, text="Choose Profilname to save")
self.profile_name_label.grid(row=0, column=0)
self.profile_name_entry = Entry(self.frame3, width=15)
self.profile_name_entry.grid(row=0, column=1)
# self.profile_oid_label = Label(self.frame3, text="Choose oid to delete")
# self.profile_oid_label.grid(row=4, column=0)
self.profile_oid_entry = Entry(self.frame3, width=10)
self.profile_oid_entry.grid(row=4, column=1)
self.load_settings_entry = Entry(self.frame3, width=10)
self.load_settings_entry.grid(row=3, column=1)
# self.delete_settings_btn = Button(self.frame3, text="Delete Profile from ID", command=GUI_settings_window.profile_save_settings(self))
# self.delete_settings_btn.grid(row=4, column=0)
self.profile_oid_listbox_label = Label(self.frame3, text=" DB\nID")
self.profile_oid_listbox_label.grid(row=1, column=4, sticky=W)
self.profile_name_listbox_label = Label(self.frame3, text="Name")
self.profile_name_listbox_label.grid(row=1, column=5, sticky=W)
self.my_listbox_profile_oid = Listbox(self.frame3, width=5)
self.my_listbox_profile_oid.grid(row=2, column=4, sticky=W)
self.my_listbox_profile_name = Listbox(self.frame3, width=15)
self.my_listbox_profile_name.grid(row=2, column=5, sticky=W)
self.show_profiles_btn = Button(self.frame3, text="Show Profile from ID",
command=lambda: GUI_settings_window.profile_show_db(self))
self.show_profiles_btn.grid(row=5, column=0)
self.save_settings_btn = Button(self.frame3, text="Save Settings",
command=lambda: GUI_settings_window.profile_save_settings(self))
self.save_settings_btn.grid(row=2, column=0)
self.load_settings_btn = Button(self.frame3, text="Load Settings",
command=lambda: GUI_settings_window.profile_load_settings(self))
self.load_settings_btn.grid(row=3, column=0)
self.delete_profile_btn = Button(self.frame3, text="Delete Profile",
command=lambda: GUI_settings_window.profile_delete(self))
self.delete_profile_btn.grid(row=4, column=0)
self.create_profile_btn = Button(self.frame3, text="Create Profile-Settings",
command=lambda: GUI_settings_window.create_settings(self))
self.create_profile_btn.grid(row=6, column=0)
def show_entry_time_limited_start(self, var):
if var.get() == 0:
self.time_limited_start_label.grid_forget()
self.time_limited_start_year_label.grid_forget()
self.time_limited_start_year_entry.grid_forget()
self.time_limited_start_month_label.grid_forget()
self.time_limited_start_month_entry.grid_forget()
self.time_limited_start_day_label.grid_forget()
self.time_limited_start_day_entry.grid_forget()
self.time_limited_start_hour_label.grid_forget()
self.time_limited_start_hour_entry.grid_forget()
self.time_limited_start_minute_label.grid_forget()
self.time_limited_start_minute_entry.grid_forget()
self.time_limited_end_label.grid_forget()
self.time_limited_end_year_label.grid_forget()
self.time_limited_end_year_entry.grid_forget()
self.time_limited_end_month_label.grid_forget()
self.time_limited_end_month_entry.grid_forget()
self.time_limited_end_day_label.grid_forget()
self.time_limited_end_day_entry.grid_forget()
self.time_limited_end_hour_label.grid_forget()
self.time_limited_end_hour_entry.grid_forget()
self.time_limited_end_minute_label.grid_forget()
self.time_limited_end_minute_entry.grid_forget()
else:
self.time_limited_start_label.grid(row=10, column=1, sticky=W, padx=50)
self.time_limited_start_day_label.grid(row=11, column=1, sticky=W, padx=30)
self.time_limited_start_month_label.grid(row=11, column=1, sticky=W, padx=55)
self.time_limited_start_year_label.grid(row=11, column=1, sticky=W, padx=80)
self.time_limited_start_hour_label.grid(row=11, column=1, sticky=W, padx=110)
self.time_limited_start_minute_label.grid(row=11, column=1, sticky=W, padx=135)
self.time_limited_end_label.grid(row=10, column=1, sticky=E, padx=50)
self.time_limited_end_day_label.grid(row=11, column=1, sticky=E, padx=110)
self.time_limited_end_month_label.grid(row=11, column=1, sticky=E, padx=85)
self.time_limited_end_year_label.grid(row=11, column=1, sticky=E, padx=55)
self.time_limited_end_hour_label.grid(row=11, column=1, sticky=E, padx=30)
self.time_limited_end_minute_label.grid(row=11, column=1, sticky=E, padx=5)
self.time_limited_start_day_entry.grid(row=12, column=1, sticky=W, padx=30)
self.time_limited_start_month_entry.grid(row=12, column=1, sticky=W, padx=55)
self.time_limited_start_year_entry.grid(row=12, column=1, sticky=W, padx=80)
self.time_limited_start_hour_entry.grid(row=12, column=1, sticky=W, padx=110)
self.time_limited_start_minute_entry.grid(row=12, column=1, sticky=W, padx=135)
self.time_limited_end_day_entry.grid(row=12, column=1, sticky=E, padx=110)
self.time_limited_end_month_entry.grid(row=12, column=1, sticky=E, padx=85)
self.time_limited_end_year_entry.grid(row=12, column=1, sticky=E, padx=55)
self.time_limited_end_hour_entry.grid(row=12, column=1, sticky=E, padx=30)
self.time_limited_end_minute_entry.grid(row=12, column=1, sticky=E, padx=5)
def show_introduction_textfield(self, introduction_var):
print(introduction_var.get())
if introduction_var.get() == 0:
self.introduction_bar.grid_forget()
self.introduction_infobox.grid_forget()
else:
self.introduction_bar.grid(row=15, column=1, sticky=E)
self.introduction_infobox.grid(row=15, column=1, padx=30)
self.introduction_bar.config(command=self.introduction_infobox.yview)
self.introduction_infobox.config(yscrollcommand=self.introduction_bar.set)
def enable_autosave(self, var):
if var.get() == 0:
self.check_autosave_interval_entry.grid_forget()
self.check_autosave_interval_label.grid_forget()
else:
self.check_autosave_interval_entry.grid(row=4, column=3, padx=10)
self.check_autosave_interval_label.grid(row=4, column=3, padx=50, sticky=W)
def show_concluding_remarks(self, var):
if var.get() == 0:
self.concluding_remarks_bar.grid_forget()
self.concluding_remarks_infobox.grid_forget()
else:
self.concluding_remarks_bar.grid(row=22, column=3, sticky=E)
self.concluding_remarks_infobox.grid(row=22, column=3, padx=30)
self.concluding_remarks_bar.config(command=self.concluding_remarks_infobox.yview)
self.concluding_remarks_infobox.config(yscrollcommand=self.concluding_remarks_bar.set)
def profile_show_db(self):
conn = sqlite3.connect('test_settings_profiles_db.db')
c = conn.cursor()
c.execute("SELECT *, oid FROM my_profiles_table")
profile_records = c.fetchall()
# Clear List Boxes
self.my_listbox_profile_name.delete(0, END)
self.my_listbox_profile_oid.delete(0, END)
# Loop thru Results
for profile_record in profile_records:
self.my_listbox_profile_name.insert(END, profile_record[0])
self.my_listbox_profile_oid.insert(END, profile_record[len(profile_record) - 1])
self.profile_records_len = len(profile_records)
# print(profile_records[len(profile_records)-1])
conn.commit()
conn.close()
print("LOOP THROUGH... SHOW PROFILES!")
def profile_save_settings(self):
conn = sqlite3.connect('test_settings_profiles_db.db')
c = conn.cursor()
# Insert into Table
c.execute(
"INSERT INTO my_profiles_table VALUES ("
":profile_name, :entry_description, :radio_select_question, :radio_select_anonymous, :check_online, :check_time_limited, "
":check_introduction, :entry_introduction, :check_test_properties, "
":entry_test_start_year, :entry_test_start_month, :entry_test_start_day, :entry_test_start_hour, :entry_test_start_minute,"
":entry_test_end_year, :entry_test_end_month, :entry_test_end_day, :entry_test_end_hour, :entry_test_end_minute,"
":entry_test_password, :check_specific_users, :entry_limit_users, :entry_user_inactivity, :entry_limit_test_runs,"
":entry_limit_time_betw_test_run_month, :entry_limit_time_betw_test_run_day, :entry_limit_time_betw_test_run_hour, :entry_limit_time_betw_test_run_minute,"
":check_processing_time, :entry_processing_time_in_minutes, :check_processing_time_reset,"
":check_examview, :check_examview_titel, :check_examview_username, :check_show_ilias_nr,"
":radio_select_show_question_title, :check_autosave, :entry_autosave_interval, :check_mix_questions, :check_show_solution_notes, :check_direct_response,"
":radio_select_user_response, :check_mandatory_questions, :check_use_previous_solution, :check_show_test_cancel, :radio_select_not_answered_questions,"
":check_show_question_list_process_status, :check_question_mark, :check_overview_answers, :check_show_end_comment, :entry_end_comment, :check_forwarding, :check_notification)",
{
'profile_name': self.profile_name_entry.get(),
'entry_description': self.description_infobox.get("1.0", 'end-1c'),
'radio_select_question': self.select_question.get(),
'radio_select_anonymous': self.select_anonym.get(),
'check_online': self.var_online.get(),
'check_time_limited': self.var_time_limited.get(),
'check_introduction': self.var_introduction.get(),
'entry_introduction': self.introduction_infobox.get("1.0", 'end-1c'),
'check_test_properties': self.var_test_prop.get(),
'entry_test_start_year': self.test_start_year_entry.get(),
'entry_test_start_month': self.test_start_month_entry.get(),
'entry_test_start_day': self.test_start_day_entry.get(),
'entry_test_start_hour': self.test_start_hour_entry.get(),
'entry_test_start_minute': self.test_start_minute_entry.get(),
'entry_test_end_year': self.test_end_year_entry.get(),
'entry_test_end_month': self.test_end_month_entry.get(),
'entry_test_end_day': self.test_end_day_entry.get(),
'entry_test_end_hour': self.test_end_hour_entry.get(),
'entry_test_end_minute': self.test_end_minute_entry.get(),
'entry_test_password': self.test_password_entry.get(),
'check_specific_users': self.var_specific_users.get(),
'entry_limit_users': self.limit_users_max_amount_entry.get(),
'entry_user_inactivity': self.inactivity_time_for_users_entry.get(),
'entry_limit_test_runs': self.limit_test_runs_entry.get(),
'entry_limit_time_betw_test_run_month': self.limit_time_betw_test_runs_month_entry.get(),
'entry_limit_time_betw_test_run_day': self.limit_time_betw_test_runs_day_entry.get(),
'entry_limit_time_betw_test_run_hour': self.limit_time_betw_test_runs_hour_entry.get(),
'entry_limit_time_betw_test_run_minute': self.limit_time_betw_test_runs_minute_entry.get(),
'check_processing_time': self.var_processing_time.get(),
'entry_processing_time_in_minutes': self.limit_processing_time_minutes_entry.get(),
'check_processing_time_reset': self.var_processing_time_reset.get(),
'check_examview': self.var_examview.get(),
'check_examview_titel': self.var_examview_test_title.get(),
'check_examview_username': self.var_examview_user_name.get(),
'check_show_ilias_nr': self.var_show_ilias_nr.get(),
'radio_select_show_question_title': self.select_show_question_title.get(),
'check_autosave': self.var_autosave.get(),
'entry_autosave_interval': self.check_autosave_interval_entry.get(),
'check_mix_questions': self.var_mix_questions.get(),
'check_show_solution_notes': self.var_show_solution_notes.get(),
'check_direct_response': self.var_direct_response.get(),
'radio_select_user_response': self.select_user_response.get(),
'check_mandatory_questions': self.var_mandatory_questions.get(),
'check_use_previous_solution': self.var_use_previous_solution.get(),
'check_show_test_cancel': self.var_show_test_cancel.get(),
'radio_select_not_answered_questions': self.select_not_answered_questions.get(),
'check_show_question_list_process_status': self.var_show_question_list_process_status.get(),
'check_question_mark': self.var_question_mark.get(),
'check_overview_answers': self.var_overview_answers.get(),
'check_show_end_comment': self.var_show_end_comment.get(),
'entry_end_comment': self.concluding_remarks_infobox.get("1.0", 'end-1c'),
'check_forwarding': self.var_forwarding.get(),
'check_notification': self.var_notification.get()
}
)
conn.commit()
conn.close()
print("GOT VALUES")
def profile_load_settings(self):
print("LOAD")
conn = sqlite3.connect('test_settings_profiles_db.db')
c = conn.cursor()
c.execute("SELECT * FROM my_profiles_table WHERE oid =" + self.load_settings_entry.get())
profile_records = c.fetchall()
# Loop thru Results
for profile_record in profile_records:
self.profile_name_entry.get()
# profil_name_entry -> profile_record[0]
self.description_infobox.delete('1.0', END)
self.description_infobox.insert('1.0', profile_record[1])
self.select_question.set(profile_record[2])
self.select_anonym.set(profile_record[3])
self.var_online.set(profile_record[4])
self.var_time_limited.set(profile_record[5])
self.var_introduction.set(profile_record[6])
self.introduction_infobox.delete('1.0', END)
self.introduction_infobox.insert('1.0', profile_record[7])
self.var_test_prop.set(profile_record[8])
self.test_start_year_entry.delete(0, END)
self.test_start_year_entry.insert(0, profile_record[9])
self.test_start_month_entry.delete(0, END)
self.test_start_month_entry.insert(0, profile_record[10])
self.test_start_day_entry.delete(0, END)
self.test_start_day_entry.insert(0, profile_record[11])
self.test_start_hour_entry.delete(0, END)
self.test_start_hour_entry.insert(0, profile_record[12])
self.test_start_minute_entry.delete(0, END)
self.test_start_minute_entry.insert(0, profile_record[13])
self.test_end_year_entry.delete(0, END)
self.test_end_year_entry.insert(0, profile_record[14])
self.test_end_month_entry.delete(0, END)
self.test_end_month_entry.insert(0, profile_record[15])
self.test_end_day_entry.delete(0, END)
self.test_end_day_entry.insert(0, profile_record[16])
self.test_end_hour_entry.delete(0, END)
self.test_end_hour_entry.insert(0, profile_record[17])
self.test_end_minute_entry.delete(0, END)
self.test_end_minute_entry.insert(0, profile_record[18])
self.test_password_entry.delete(0, END)
self.test_password_entry.insert(0, profile_record[19])
self.var_specific_users.set(profile_record[20])
self.limit_users_max_amount_entry.delete(0, END)
self.limit_users_max_amount_entry.insert(0, profile_record[21])
self.inactivity_time_for_users_entry.delete(0, END)
self.inactivity_time_for_users_entry.insert(0, profile_record[22])
self.limit_test_runs_entry.delete(0, END)
self.limit_test_runs_entry.insert(0, profile_record[23])
self.limit_time_betw_test_runs_month_entry.delete(0, END)
self.limit_time_betw_test_runs_month_entry.insert(0, profile_record[24])
self.limit_time_betw_test_runs_day_entry.delete(0, END)
self.limit_time_betw_test_runs_day_entry.insert(0, profile_record[25])
self.limit_time_betw_test_runs_hour_entry.delete(0, END)
self.limit_time_betw_test_runs_hour_entry.insert(0, profile_record[26])
self.limit_time_betw_test_runs_minute_entry.delete(0, END)
self.limit_time_betw_test_runs_minute_entry.insert(0, profile_record[27])
self.var_processing_time.set(profile_record[28])
self.limit_processing_time_minutes_entry.delete(0, END)
self.limit_processing_time_minutes_entry.insert(0, profile_record[29])
self.var_processing_time_reset.set(profile_record[30])
self.var_examview.set(profile_record[31])
self.var_examview_test_title.set(profile_record[32])
self.var_examview_user_name.set(profile_record[33])
self.var_show_ilias_nr.set(profile_record[34])
self.select_show_question_title.set(profile_record[35])
self.var_autosave.set(profile_record[36])
self.check_autosave_interval_entry.delete(0, END)
self.check_autosave_interval_entry.insert(0, profile_record[37])
self.var_mix_questions.set(profile_record[38])
self.var_show_solution_notes.set(profile_record[39])
self.var_direct_response.set(profile_record[40])
self.select_user_response.set(profile_record[41])
self.var_mandatory_questions.set(profile_record[42])
self.var_use_previous_solution.set(profile_record[43])
self.var_show_test_cancel.set(profile_record[44])
self.select_not_answered_questions.set(profile_record[45])
self.var_show_question_list_process_status.set(profile_record[46])
self.var_question_mark.set(profile_record[47])
self.var_overview_answers.set(profile_record[48])
self.var_show_end_comment.set(profile_record[49])
self.concluding_remarks_infobox.delete('1.0', END)
self.concluding_remarks_infobox.insert('1.0', profile_record[50])
self.var_forwarding.set(profile_record[51])
self.var_notification.set(profile_record[52])
conn.commit()
conn.close()
def profile_delete(self):
conn = sqlite3.connect('test_settings_profiles_db.db')
c = conn.cursor()
c.execute("DELETE from my_profiles_table WHERE oid= " + self.profile_oid_entry.get())
# self.profile_oid_entry(0, END)
conn.commit()
conn.close()
def profile_delete_last(self):
conn = sqlite3.connect('test_settings_profiles_db.db')
c = conn.cursor()
self.profile_oid_entry.insert(0, self.profile_records_len)
c.execute("DELETE from my_profiles_table WHERE oid= " + self.profile_oid_entry.get())
print("LAST DB ENTRY DELETED")
# self.profile_oid_entry(0, END)
conn.commit()
conn.close()
# For create test settings --> Toplevel must be opened (Test-Settings Window)
def create_settings(self):
try:
# profile_name --> profile_record[0]_
self.description = self.description_infobox.get('1.0', 'end-1c')
self.question_type = self.select_question.get()
self.anonym = self.select_anonym.get()
self.online = self.var_online.get()
self.time_limited = self.var_time_limited.get()
self.introduction_check = self.var_introduction.get()
self.introduction_text = self.introduction_infobox.get('1.0', 'end-1c')
self.test_prop = self.var_test_prop.get()
self.test_start_year = self.test_start_year_entry.get()
self.test_start_month = self.test_start_month_entry.get()
self.test_start_day = self.test_start_day_entry.get()
self.test_start_hour = self.test_start_hour_entry.get()
self.test_start_minute = self.test_start_minute_entry.get()
self.test_end_year = self.test_end_year_entry.get()
self.test_end_month = self.test_end_month_entry.get()
self.test_end_day = self.test_end_day_entry.get()
self.test_end_hour = self.test_end_hour_entry.get()
self.test_end_minute = self.test_end_minute_entry.get()
self.test_password = self.test_password_entry.get()
self.specific_users = self.var_specific_users.get()
self.limit_users_max = self.limit_users_max_amount_entry.get()
self.inactivity_time_for_users = self.inactivity_time_for_users_entry.get()
self.limit_test_runs = self.limit_test_runs_entry.get()
self.limit_time_betw_test_runs_month = self.limit_time_betw_test_runs_month_entry.get()
self.limit_time_betw_test_runs_day = self.limit_time_betw_test_runs_day_entry.get()
self.limit_time_betw_test_runs_hour = self.limit_time_betw_test_runs_hour_entry.get()
self.limit_time_betw_test_runs_minute = self.limit_time_betw_test_runs_minute_entry.get()
self.processing_time = self.var_processing_time.get()
self.limit_processing_time_minutes = self.limit_processing_time_minutes_entry.get()
self.check_processing_time_reset = self.var_processing_time_reset.get()
self.examview = self.var_examview.get()
self.examview_test_title = self.var_examview_test_title.get()
self.examview_user_name = self.var_examview_user_name.get()
self.show_ilias_nr = self.var_show_ilias_nr.get()
self.show_question_title = self.select_show_question_title.get()
self.autosave = self.var_autosave.get()
self.autosave_interval = self.check_autosave_interval_entry.get()
self.mix_questions = self.var_mix_questions.get()
self.show_solution_notes = self.var_show_solution_notes.get()
self.direct_response = self.var_direct_response.get()
self.user_response = self.select_user_response.get()
self.mandatory_questions = self.var_mandatory_questions.get()
self.use_previous_solution = self.var_use_previous_solution.get()
self.show_test_cancel = self.var_show_test_cancel.get()
self.not_answered_questions = self.select_not_answered_questions.get()
self.show_question_list_process_status = self.var_show_question_list_process_status.get()
self.question_mark = self.var_question_mark.get()
self.overview_answers = self.var_overview_answers.get()
self.show_end_comment = self.var_show_end_comment.get()
self.concluding_remarks = self.concluding_remarks_infobox.get("1.0", 'end-1c')
self.forwarding = self.var_forwarding.get()
self.notification = self.var_notification.get()
self.mytree = ET.parse(self.qti_file_path_write)
self.myroot = self.mytree.getroot()
# hours_from_minutes = str(datetime.timedelta(minutes=int(self.limit_processing_time_minutes)))
self.duration_time = int(self.limit_processing_time_minutes)
self.duration_time_hours = self.duration_time // 60
self.duration_time_minutes = self.duration_time % 60
# Format of duration: P0Y0M0DT1H30M0S
self.duration = "P0Y0M0DT" + str(self.duration_time_hours) + "H" + str(self.duration_time_minutes) + "M0S"
for qticomment in self.myroot.iter('qticomment'):
qticomment.text = self.description
break
for duration in self.myroot.iter('duration'):
duration.text = self.duration
break
questestinterop = ET.Element('questestinterop')
assessment = ET.SubElement(questestinterop, 'assessment')
qticomment = ET.SubElement(assessment, 'qticomment')
qticomment.text = self.description
for qtimetadatafield in self.myroot.iter('qtimetadatafield'):
if qtimetadatafield.find('fieldlabel').text == "anonymity":
qtimetadatafield.find('fieldentry').text = self.anonym
if self.anonym == "":
qtimetadatafield.find('fieldentry').text = "0"
print("NO ENTRY IN <ANONYM>")
if qtimetadatafield.find('fieldlabel').text == "question_set_type":
if self.question_type == 0:
qtimetadatafield.find('fieldentry').text = "FIXED_QUEST_SET"
# print("WRITE FIXED-Question")
elif self.question_type == 1:
qtimetadatafield.find('fieldentry').text = "RANDOM_QUEST_SET"
# print("WRITE RANDOM-Question")
elif self.question_type == 2:
qtimetadatafield.find('fieldentry').text = "DYNAMIC_QUEST_SET"
# print("WRITE DYNAMIC-Question")
else:
qtimetadatafield.find('fieldentry').text = "FIXED_QUEST_SET"
print("NO ENTRY IN <QUESTION_TYPE> ")
# if qtimetadatafield.find('fieldlabel').text == "author":
# qtimetadatafield.find('fieldentry').text = str(Formelfrage.autor_entry.get())
if qtimetadatafield.find('fieldlabel').text == "reset_processing_time":
qtimetadatafield.find('fieldentry').text = str(self.check_processing_time_reset)
if self.check_processing_time_reset == "":
qtimetadatafield.find('fieldentry').text = "0"
print("NO ENTRY IN <RESET PROCESSING TIME>")
if qtimetadatafield.find('fieldlabel').text == "password":
qtimetadatafield.find('fieldentry').text = str(self.test_password)
if qtimetadatafield.find('fieldlabel').text == "allowedUsers":
qtimetadatafield.find('fieldentry').text = str(self.limit_users_max)
if qtimetadatafield.find('fieldlabel').text == "allowedUsersTimeGap":
qtimetadatafield.find('fieldentry').text = str(self.inactivity_time_for_users)
if qtimetadatafield.find('fieldlabel').text == "nr_of_tries":
qtimetadatafield.find('fieldentry').text = str(self.limit_test_runs)
if qtimetadatafield.find('fieldlabel').text == "pass_waiting":
qtimetadatafield.find('fieldentry').text = str(self.limit_time_betw_test_runs_month) + ":0" + str(
self.limit_time_betw_test_runs_day) + ":" + str(
self.limit_time_betw_test_runs_hour) + ":" + str(self.limit_time_betw_test_runs_minute) + ":00"
if self.limit_time_betw_test_runs_month == "MM":
qtimetadatafield.find('fieldentry').text = "00:000:00:00:00"
print(
" >WARNING< NO limit_time_betw_test_runs SET.. --> set limit_time to \"00:000:00:00:00\" ")
# Prüfungsansicht: Alle drei haken (Titel+Ansicht): "7" / Zwei Haken (Titel) = "3" / Zwei Haken (Name) = "5" / Ein Haken = "1" / "0" -> deaktiviert
if qtimetadatafield.find('fieldlabel').text == "kiosk":
if self.examview == 0:
qtimetadatafield.find('fieldentry').text = "0"
elif self.examview == 1:
qtimetadatafield.find('fieldentry').text = "1"
elif self.examview == 1 and self.examview_test_title == 1:
qtimetadatafield.find('fieldentry').text = "3"
elif self.examview == 1 and self.examview_user_name == 1:
qtimetadatafield.find('fieldentry').text = "5"
elif self.examview == 1 and self.examview_user_name == 1 and self.examview_test_title == 1:
qtimetadatafield.find('fieldentry').text = "7"
# if qtimetadatafield.find('fieldlabel').text == "use_previous_answers":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "title_output":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "examid_in_test_pass":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "show_summary":
# qtimetadatafield.find('fieldentry').text = "0"
if qtimetadatafield.find('fieldlabel').text == "show_cancel":
qtimetadatafield.find('fieldentry').text = str(self.show_test_cancel)
# if qtimetadatafield.find('fieldlabel').text == "show_marker":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "fixed_participants":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "showinfo":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "shuffle_questions":
qtimetadatafield.find('fieldentry').text = str(self.mix_questions)
if qtimetadatafield.find('fieldlabel').text == "processing_time":
# self.minutes = self.limit_processing_time_minutes
hours_from_minutes = str(datetime.timedelta(minutes=int(self.limit_processing_time_minutes)))
print("len_min_to_hours: " + str(hours_from_minutes))
qtimetadatafield.find('fieldentry').text = "0" + hours_from_minutes
if qtimetadatafield.find('fieldlabel').text == "enable_examview":
qtimetadatafield.find('fieldentry').text = str(self.examview)
# if qtimetadatafield.find('fieldlabel').text == "show_examview_pdf":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "starting_time":
qtimetadatafield.find('fieldentry').text = "P" + str(self.test_start_year) + "Y" + str(
self.test_start_month) + "M" + str(self.test_start_day) + "DT" + str(
self.test_start_hour) + "H" + str(self.test_start_minute) + "M" + "0S"
if self.test_start_year == "YYYY":
qtimetadatafield.find('fieldentry').text = "P2020Y1M1DT00H0M0S"
print(" >WARNING< NO STARTING TIME SET.. --> set START to \"P2020Y1M1DT00H0M0S\"")
if qtimetadatafield.find('fieldlabel').text == "ending_time":
qtimetadatafield.find('fieldentry').text = "P" + str(self.test_end_year) + "Y" + str(
self.test_end_month) + "M" + str(self.test_end_day) + "DT" + str(
self.test_end_hour) + "H" + str(self.test_end_minute) + "M" + "0S"
if self.test_end_year == "YYYY":
qtimetadatafield.find('fieldentry').text = "P2020Y12M30DT00H0M0S"
print(" >WARNING< NO ENDING TIME SET.. --> set END to \"P2020Y12M30DT00H0M0S\"")
if qtimetadatafield.find('fieldlabel').text == "autosave":
qtimetadatafield.find('fieldentry').text = str(self.autosave)
if qtimetadatafield.find('fieldlabel').text == "autosave_ival":
qtimetadatafield.find('fieldentry').text = str(self.autosave_interval)
# if qtimetadatafield.find('fieldlabel').text == "offer_question_hints":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "obligations_enabled":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "enable_processing_time":
qtimetadatafield.find('fieldentry').text = str(self.processing_time)
# if qtimetadatafield.find('fieldlabel').text == "mark_step_0":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "mark_step_1":
# qtimetadatafield.find('fieldentry').text = "99"
# tree = ET.ElementTree(questestinterop)
# tree.write("WORKED_neuerAnfang.xml")
print("Write Test_Settings to File")
self.mytree.write(self.qti_file_path_write)
print("Create Test WITH Test_settings")
except:
e = sys.exc_info()[0]
print('\033[91m' + "Error: %s" % e + '\033[0m')
print(
'\033[91m' + "To use Test-Settings properly, the \"Test_settings\"-window must be opened when create the question" + '\033[0m')
"""
# <------------ FORMELFRAGE-TEST ERSTELLEN ----------->
class Create_Formelfrage_Test(Formelfrage):
def __init__(self, entry_to_index_dict):
self.ff_db_entry_to_index_dict = entry_to_index_dict
test_generator_modul_ilias_test_struktur.Create_ILIAS_Test.__init__(self,
self.ff_db_entry_to_index_dict,
self.formelfrage_test_tst_file_path_template,
self.formelfrage_test_tst_file_path_output,
self.formelfrage_test_qti_file_path_template,
self.formelfrage_test_qti_file_path_output,
self.ff_ilias_test_title_entry.get(),
self.create_formelfrage_test_entry.get(),
self.ff_question_type_entry.get(),
)
# <------------ FORMELFRAGE-POOL ERSTELLEN ----------->
class Create_Formelfrage_Pool(Formelfrage):
def __init__(self, entry_to_index_dict, var_create_all_questions, var_create_multiple_question_pools_from_tax, var_calculate_value_range_for_pool_ids):
self.ff_entry_to_index_dict = entry_to_index_dict
self.ff_var_create_question_pool_all = var_create_all_questions
self.var_create_multiple_question_pools_from_tax = var_create_multiple_question_pools_from_tax
self.ff_pool_entry = self.create_formelfrage_pool_entry.get()
self.taxonomy_collection_no_dublicates = []
self.pool_number_list = []
self.taxonomy_number_list = []
self.directory_number_list = []
self.oid_number_list_temp = []
self.oid_number_list = []
# Wertebereich berechnen für Fragenpool Einträge
if var_calculate_value_range_for_pool_ids == 1:
print("Wertebereich für Pool-IDs berechnen")
Formelfrage.ff_calculate_value_range_function_in_GUI(self, self.ff_pool_entry)
# "Normalerweise" wird nur ein Fragenpool erstellt
# Wenn mehrere Fragenpools "nach Taxonomie getrennt" erstellt werden sollen, wird "self.number_of_pool"
# auf die Anzahl der Taxonomien gesetzt
self.number_of_pools = 1
# Wenn "nach Taxonomie getrennte Fragenpools" == 1:
if self.ff_var_create_multiple_question_pools_from_tax_check.get() == 1:
self.tax_entries_from_db_list = []
self.oid_entries_from_db_list = []
self.tax_and_oid_entries_from_db_list = []
self.ids_with_same_tax_list = []
self.list_of_lists = []
# Verbindung mit Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = c.fetchall()
# Alle Einträge aus der DB nehmen
if self.ff_var_create_question_pool_all == 1:
for ff_db_record in ff_db_records:
self.oid_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.tax_entries_from_db_list.append(ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']])
self.oid_entries_from_db_list.pop(0)
self.tax_entries_from_db_list.pop(0)
# ID's aus dem Eingabefeld nehmen
else:
self.ff_pool_entry_list = []
self.ff_pool_entry_list = self.ff_pool_entry.split(',')
for ff_db_record in ff_db_records:
if str(ff_db_record[len(ff_db_record) - 1]) in self.ff_pool_entry_list:
self.oid_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.tax_entries_from_db_list.append(ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']])
# Listen zusammenfügen
for i in range(len(self.oid_entries_from_db_list)):
self.tax_and_oid_entries_from_db_list.append([self.oid_entries_from_db_list[i], self.tax_entries_from_db_list[i]])
# Taxonomie der Fragen (ohne doppelte Einträge)
self.taxonomy_collection_no_dublicates = list(dict.fromkeys(self.tax_entries_from_db_list))
new_list = []
# 1. Feld auslesen (Tax_id)
# Bsp. Format von "self.tax_and_oid_entries_from_db_list" = [[2, '1'], [3, '2'], [4, '2'], [5, '3'], [6, '3']]
# Taxonomien sind hier als '1', '2','3' deklariert
# Tax_id im Bsp. self.id_temp = '1'
self.id_temp = self.tax_and_oid_entries_from_db_list[0][1]
#new_list.append(self.tax_and_oid_entries_from_db_list[0][0])
for k in range(len(self.tax_and_oid_entries_from_db_list)):
#self.id_temp = self.tax_and_oid_entries_from_db_list[k][1]
#print(self.id_temp, k)
if self.tax_and_oid_entries_from_db_list[k][1] == self.id_temp:
new_list.append(self.tax_and_oid_entries_from_db_list[k][0])
else:
self.list_of_lists.append(new_list)
new_list = []
new_list.append(self.tax_and_oid_entries_from_db_list[k][0])
self.id_temp = self.tax_and_oid_entries_from_db_list[k][1]
# new_list wird nur der list_of_lists hinzugefügt wenn die Taxonomien unterschiedlich sind
# Da die letzten Taxonomien gleich sein können, muss nochmal manuell der Befehl gestartet werden
self.list_of_lists.append(new_list)
self.number_of_pools = len(self.list_of_lists)
# Die __init__ wird bei einem Knopfdruck auf "ILIAS-Fragenpool erstellen" ausgeführt
# Es werden XML-Dateien und Ordner mit einer aufsteigenden ID erstellt.
for pool_number in range(self.number_of_pools):
if self.var_create_multiple_question_pools_from_tax == 1:
self.string_entry = ','.join(map(str, self.list_of_lists[pool_number]))
self.ff_pool_entry = self.string_entry
self.ilias_id_pool_img_dir, self.ilias_id_pool_qpl_dir, self.pool_qti_file_path_output, self.pool_qpl_file_path_output, self.ilias_id_pool_qti_xml, self.file_max_id, self.taxonomy_file_question_pool = test_generator_modul_ilias_test_struktur.Create_ILIAS_Pool.__init__(
self, self.project_root_path, self.formelfrage_pool_directory_output,
self.formelfrage_files_path_pool_output, self.formelfrage_pool_qti_file_path_template,
self.ff_ilias_test_title_entry.get(), self.ff_pool_entry, self.ff_question_type_name,
self.database_formelfrage_path, self.ff_database_table, self.ff_db_entry_to_index_dict,
self.ff_var_create_question_pool_all)
print("POOL ENTRY: " + str(self.ff_pool_entry))
# Variablen für Bildschirmausgabe sammeln
self.pool_number_list.append(pool_number)
self.directory_number_list.append(self.ilias_id_pool_qpl_dir)
self.oid_number_list_temp = self.ff_pool_entry.split(',')
self.oid_number_list.append(len(self.oid_number_list_temp))
# Formelfrage Fragen erstellen
Create_Formelfrage_Questions.__init__(self,
self.ff_db_entry_to_index_dict,
self.ff_pool_entry,
"question_pool",
self.ilias_id_pool_img_dir,
self.ilias_id_pool_qpl_dir,
self.formelfrage_pool_qti_file_path_template,
self.pool_qti_file_path_output,
self.pool_qpl_file_path_output,
self.ilias_id_pool_qti_xml,
self.file_max_id,
self.taxonomy_file_question_pool)
# In der erstellten XML Datei muss "&" gegen "&" getauscht werden
test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file(self, self.pool_qti_file_path_output)
# Taxonomien werden für erstellte Pools nicht verwendet
if self.ff_var_remove_pool_tags_for_tax_check.get == 1:
# Hier wird die Taxonomie des Fragenpools bearbeitet / konfiguriert
test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool(self,
self.ff_pool_entry,
self.ff_var_create_question_pool_all,
self.database_formelfrage_path,
"formelfrage_table",
self.ff_entry_to_index_dict,
self.taxonomy_file_question_pool,
self.pool_qti_file_path_output,
pool_number,
self.number_of_pools
)
# Abgeschlossener Fragenpool abgelegt
print("______________________________________________________________________")
print(" ---> Fragenpool im Ordner \"" + self.ilias_id_pool_qpl_dir + "\" erstellt! ")
string_collection = ""
if self.var_create_multiple_question_pools_from_tax == 1:
for i in range(len(self.pool_number_list)):
string_collection += "Fragenpool: " + str(self.pool_number_list[i]+1) + "/" + str(len(self.pool_number_list)) + "\n" + \
"Abgelegt im Ordner: " + str(self.directory_number_list[i]) + "\n" + \
"Taxonomie: " + str(self.taxonomy_collection_no_dublicates[i]) + "\n" + \
"Anzahl der Fragen: " + str(self.oid_number_list[i]) + " \n" + \
"_____________________________________________________________" + "\n" + \
"\n"
messagebox.showinfo("Fragenpool erstellen", "Fragenpool wurde erstellt!" + "\n\n" +
string_collection)
| [
"re.escape",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex",
"pandas.core.reshape.util.cartesian_product",
"xml.etree.ElementTree.parse",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic",
"numpy.linspace",
"tkinter.ttk.Combobox",
"Test_Generator_Module.test_generator_modul_datenbanken_anzeigen.MainGUI.__init__",
"Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db",
"tkinter.messagebox.showinfo",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup",
"Test_Generator_Module.test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2",
"xml.etree.ElementTree.SubElement",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question",
"sqlite3.connect",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1",
"os.path.join",
"Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx",
"xml.etree.ElementTree.Element",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool"
] | [((6566, 6613), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (6581, 6613), False, 'import sqlite3\n'), ((30528, 30620), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame_vector_diagram'], {'value': 'self.ff_vector_diagram_type', 'width': '(20)'}), '(self.ff_frame_vector_diagram, value=self.\n ff_vector_diagram_type, width=20)\n', (30540, 30620), False, 'from tkinter import ttk\n'), ((39550, 39622), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_processingtime_hours', 'width': '(2)'}), '(self.ff_frame, value=self.ff_processingtime_hours, width=2)\n', (39562, 39622), False, 'from tkinter import ttk\n'), ((39659, 39733), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_processingtime_minutes', 'width': '(2)'}), '(self.ff_frame, value=self.ff_processingtime_minutes, width=2)\n', (39671, 39733), False, 'from tkinter import ttk\n'), ((39770, 39844), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_processingtime_seconds', 'width': '(2)'}), '(self.ff_frame, value=self.ff_processingtime_seconds, width=2)\n', (39782, 39844), False, 'from tkinter import ttk\n'), ((52904, 52980), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_numbers_of_answers_value', 'width': '(3)'}), '(self.ff_frame, value=self.ff_numbers_of_answers_value, width=3)\n', (52916, 52980), False, 'from tkinter import ttk\n'), ((53529, 53594), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (53541, 53594), False, 'from tkinter import ttk\n'), ((53674, 53739), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (53686, 53739), False, 'from tkinter import ttk\n'), ((53819, 53884), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (53831, 53884), False, 'from tkinter import ttk\n'), ((53964, 54029), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (53976, 54029), False, 'from tkinter import ttk\n'), ((54109, 54174), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54121, 54174), False, 'from tkinter import ttk\n'), ((54254, 54319), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54266, 54319), False, 'from tkinter import ttk\n'), ((54399, 54464), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54411, 54464), False, 'from tkinter import ttk\n'), ((68771, 68847), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_numbers_of_results_value', 'width': '(3)'}), '(self.ff_frame, value=self.ff_numbers_of_results_value, width=3)\n', (68783, 68847), False, 'from tkinter import ttk\n'), ((110784, 110831), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (110799, 110831), False, 'import sqlite3\n'), ((143203, 143250), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (143218, 143250), False, 'import sqlite3\n'), ((156957, 157004), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (156972, 157004), False, 'import sqlite3\n'), ((175182, 175229), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (175197, 175229), False, 'import sqlite3\n'), ((197402, 197438), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_read_qti_template_path'], {}), '(xml_read_qti_template_path)\n', (197410, 197438), True, 'import xml.etree.ElementTree as ET\n'), ((198572, 198619), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (198587, 198619), False, 'import sqlite3\n'), ((233611, 233658), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (233626, 233658), False, 'import sqlite3\n'), ((249981, 250031), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['xml_qtimetadata', '"""qtimetadatafield"""'], {}), "(xml_qtimetadata, 'qtimetadatafield')\n", (249994, 250031), True, 'import xml.etree.ElementTree as ET\n'), ((250054, 250099), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (250067, 250099), True, 'import xml.etree.ElementTree as ET\n'), ((250161, 250206), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (250174, 250206), True, 'import xml.etree.ElementTree as ET\n'), ((254477, 254527), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['xml_qtimetadata', '"""qtimetadatafield"""'], {}), "(xml_qtimetadata, 'qtimetadatafield')\n", (254490, 254527), True, 'import xml.etree.ElementTree as ET\n'), ((254550, 254595), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (254563, 254595), True, 'import xml.etree.ElementTree as ET\n'), ((254662, 254707), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (254675, 254707), True, 'import xml.etree.ElementTree as ET\n'), ((343427, 343533), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Fragenpool erstellen"""', "('Fragenpool wurde erstellt!' + '\\n\\n' + string_collection)"], {}), "('Fragenpool erstellen', 'Fragenpool wurde erstellt!' +\n '\\n\\n' + string_collection)\n", (343446, 343533), False, 'from tkinter import messagebox\n'), ((4014, 4071), 'os.path.join', 'os.path.join', (['self.project_root_path', '"""ILIAS-Formelfrage"""'], {}), "(self.project_root_path, 'ILIAS-Formelfrage')\n", (4026, 4071), False, 'import os\n'), ((4141, 4206), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_pool_abgabe"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_pool_abgabe')\n", (4153, 4206), False, 'import os\n'), ((4303, 4392), 'os.path.join', 'os.path.join', (['self.project_root_path', '"""Test_Generator_Datenbanken"""', 'self.ff_database'], {}), "(self.project_root_path, 'Test_Generator_Datenbanken', self.\n ff_database)\n", (4315, 4392), False, 'import os\n'), ((4503, 4620), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_test_qti_und_tst_dateien_vorlage"""', '"""ilias_test_vorlage__qti__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_test_qti_und_tst_dateien_vorlage', 'ilias_test_vorlage__qti__.xml')\n", (4515, 4620), False, 'import os\n'), ((4691, 4808), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_test_qti_und_tst_dateien_vorlage"""', '"""ilias_test_vorlage__tst__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_test_qti_und_tst_dateien_vorlage', 'ilias_test_vorlage__tst__.xml')\n", (4703, 4808), False, 'import os\n'), ((4945, 5078), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_test_abgabe"""', '"""1604407426__0__tst_2040314"""', '"""1604407426__0__qti_2040314.xml"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_test_abgabe',\n '1604407426__0__tst_2040314', '1604407426__0__qti_2040314.xml')\n", (4957, 5078), False, 'import os\n'), ((5147, 5280), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_test_abgabe"""', '"""1604407426__0__tst_2040314"""', '"""1604407426__0__tst_2040314.xml"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_test_abgabe',\n '1604407426__0__tst_2040314', '1604407426__0__tst_2040314.xml')\n", (5159, 5280), False, 'import os\n'), ((5342, 5452), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_test_abgabe"""', '"""1604407426__0__tst_2040314"""', '"""objects"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_test_abgabe',\n '1604407426__0__tst_2040314', 'objects')\n", (5354, 5452), False, 'import os\n'), ((5566, 5683), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_pool_qti_und_qpl_dateien_vorlage"""', '"""ilias_pool_vorlage__qti__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_pool_qti_und_qpl_dateien_vorlage', 'ilias_pool_vorlage__qti__.xml')\n", (5578, 5683), False, 'import os\n'), ((5754, 5871), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_pool_qti_und_qpl_dateien_vorlage"""', '"""ilias_pool_vorlage__qpl__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_pool_qti_und_qpl_dateien_vorlage', 'ilias_pool_vorlage__qpl__.xml')\n", (5766, 5871), False, 'import os\n'), ((6163, 6228), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_pool_abgabe"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_pool_abgabe')\n", (6175, 6228), False, 'import os\n'), ((74449, 74496), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (74464, 74496), False, 'import sqlite3\n'), ((136938, 136985), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (136953, 136985), False, 'import sqlite3\n'), ((174827, 174963), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (174911, 174963), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((198903, 198950), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (198918, 198950), False, 'import sqlite3\n'), ((249032, 249055), 'xml.etree.ElementTree.parse', 'ET.parse', (['self.qpl_file'], {}), '(self.qpl_file)\n', (249040, 249055), True, 'import xml.etree.ElementTree as ET\n'), ((333882, 333929), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (333897, 333929), False, 'import sqlite3\n'), ((340645, 340778), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file', (['self', 'self.pool_qti_file_path_output'], {}), '(\n self, self.pool_qti_file_path_output)\n', (340735, 340778), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((73710, 73757), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (73725, 73757), False, 'import sqlite3\n'), ((234144, 234424), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', (['self', 'self.ff_description_img_name_1', 'self.ff_description_img_data_1', 'id_nr', 'self.ff_question_type_test_or_pool', 'self.formelfrage_test_img_file_path', 'self.formelfrage_pool_img_file_path'], {}), '(\n self, self.ff_description_img_name_1, self.ff_description_img_data_1,\n id_nr, self.ff_question_type_test_or_pool, self.\n formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)\n', (234223, 234424), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((234428, 234708), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', (['self', 'self.ff_description_img_name_2', 'self.ff_description_img_data_2', 'id_nr', 'self.ff_question_type_test_or_pool', 'self.formelfrage_test_img_file_path', 'self.formelfrage_pool_img_file_path'], {}), '(\n self, self.ff_description_img_name_2, self.ff_description_img_data_2,\n id_nr, self.ff_question_type_test_or_pool, self.\n formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)\n', (234507, 234708), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((234712, 234992), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', (['self', 'self.ff_description_img_name_3', 'self.ff_description_img_data_3', 'id_nr', 'self.ff_question_type_test_or_pool', 'self.formelfrage_test_img_file_path', 'self.formelfrage_pool_img_file_path'], {}), '(\n self, self.ff_description_img_name_3, self.ff_description_img_data_3,\n id_nr, self.ff_question_type_test_or_pool, self.\n formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)\n', (234791, 234992), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((236920, 236953), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""qticomment"""'], {}), "(item, 'qticomment')\n", (236933, 236953), True, 'import xml.etree.ElementTree as ET\n'), ((236982, 237013), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""duration"""'], {}), "(item, 'duration')\n", (236995, 237013), True, 'import xml.etree.ElementTree as ET\n'), ((237046, 237081), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""itemmetadata"""'], {}), "(item, 'itemmetadata')\n", (237059, 237081), True, 'import xml.etree.ElementTree as ET\n'), ((237114, 237149), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""presentation"""'], {}), "(item, 'presentation')\n", (237127, 237149), True, 'import xml.etree.ElementTree as ET\n'), ((237176, 237211), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['presentation', '"""flow"""'], {}), "(presentation, 'flow')\n", (237189, 237211), True, 'import xml.etree.ElementTree as ET\n'), ((237261, 237292), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['flow', '"""material"""'], {}), "(flow, 'material')\n", (237274, 237292), True, 'import xml.etree.ElementTree as ET\n'), ((237341, 237396), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['question_description_material', '"""mattext"""'], {}), "(question_description_material, 'mattext')\n", (237354, 237396), True, 'import xml.etree.ElementTree as ET\n'), ((237428, 237470), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['itemmetadata', '"""qtimetadata"""'], {}), "(itemmetadata, 'qtimetadata')\n", (237441, 237470), True, 'import xml.etree.ElementTree as ET\n'), ((238290, 238336), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (238303, 238336), True, 'import xml.etree.ElementTree as ET\n'), ((238367, 238412), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (238380, 238412), True, 'import xml.etree.ElementTree as ET\n'), ((238494, 238539), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (238507, 238539), True, 'import xml.etree.ElementTree as ET\n'), ((238734, 238780), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (238747, 238780), True, 'import xml.etree.ElementTree as ET\n'), ((238811, 238856), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (238824, 238856), True, 'import xml.etree.ElementTree as ET\n'), ((238937, 238982), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (238950, 238982), True, 'import xml.etree.ElementTree as ET\n'), ((239172, 239218), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (239185, 239218), True, 'import xml.etree.ElementTree as ET\n'), ((239249, 239294), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (239262, 239294), True, 'import xml.etree.ElementTree as ET\n'), ((239369, 239414), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (239382, 239414), True, 'import xml.etree.ElementTree as ET\n'), ((239607, 239653), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (239620, 239653), True, 'import xml.etree.ElementTree as ET\n'), ((239684, 239729), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (239697, 239729), True, 'import xml.etree.ElementTree as ET\n'), ((239804, 239849), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (239817, 239849), True, 'import xml.etree.ElementTree as ET\n'), ((240502, 240918), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main', (['self', 'self.ff_description_img_name_1', 'self.ff_description_img_data_1', 'self.ff_description_img_name_2', 'self.ff_description_img_data_2', 'self.ff_description_img_name_3', 'self.ff_description_img_data_3', 'self.ff_question_description_main', 'question_description_mattext', 'question_description_material', 'id_nr'], {}), '(\n self, self.ff_description_img_name_1, self.ff_description_img_data_1,\n self.ff_description_img_name_2, self.ff_description_img_data_2, self.\n ff_description_img_name_3, self.ff_description_img_data_3, self.\n ff_question_description_main, question_description_mattext,\n question_description_material, id_nr)\n', (240594, 240918), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((246875, 246921), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (246888, 246921), True, 'import xml.etree.ElementTree as ET\n'), ((246952, 246997), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (246965, 246997), True, 'import xml.etree.ElementTree as ET\n'), ((247091, 247136), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (247104, 247136), True, 'import xml.etree.ElementTree as ET\n'), ((247320, 247366), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (247333, 247366), True, 'import xml.etree.ElementTree as ET\n'), ((247397, 247442), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (247410, 247442), True, 'import xml.etree.ElementTree as ET\n'), ((247521, 247566), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (247534, 247566), True, 'import xml.etree.ElementTree as ET\n'), ((248881, 249007), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_pool_abgabe"""', 'self.ilias_id_pool_qpl_dir', 'self.ilias_id_pool_qti_xml'], {}), "(self.formelfrage_files_path, 'ff_ilias_pool_abgabe', self.\n ilias_id_pool_qpl_dir, self.ilias_id_pool_qti_xml)\n", (248893, 249007), False, 'import os\n'), ((341016, 341369), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool', 'test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool', (['self', 'self.ff_pool_entry', 'self.ff_var_create_question_pool_all', 'self.database_formelfrage_path', '"""formelfrage_table"""', 'self.ff_entry_to_index_dict', 'self.taxonomy_file_question_pool', 'self.pool_qti_file_path_output', 'pool_number', 'self.number_of_pools'], {}), "(\n self, self.ff_pool_entry, self.ff_var_create_question_pool_all, self.\n database_formelfrage_path, 'formelfrage_table', self.\n ff_entry_to_index_dict, self.taxonomy_file_question_pool, self.\n pool_qti_file_path_output, pool_number, self.number_of_pools)\n", (341102, 341369), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((15657, 15733), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__', 'test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__', (['self'], {}), '(self)\n', (15727, 15733), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((23302, 23427), 'Test_Generator_Module.test_generator_modul_datenbanken_anzeigen.MainGUI.__init__', 'test_generator_modul_datenbanken_anzeigen.MainGUI.__init__', (['self', 'self.database_formelfrage_path', 'self.ff_database_table'], {}), '(self, self.\n database_formelfrage_path, self.ff_database_table)\n', (23360, 23427), False, 'from Test_Generator_Module import test_generator_modul_datenbanken_anzeigen\n'), ((26483, 26638), 'Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db', 'test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db', (['self', 'self.ff_question_type_name', 'self.ff_db_entry_to_index_dict'], {}), '(\n self, self.ff_question_type_name, self.ff_db_entry_to_index_dict)\n', (26567, 26638), False, 'from Test_Generator_Module import test_generator_modul_datenbanken_erstellen\n'), ((26909, 27202), 'Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx', 'test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx', (['self', 'self.project_root_path', 'self.ff_db_entry_to_index_dict', 'self.database_formelfrage_path', 'self.ff_database', 'self.ff_database_table', 'self.ff_xlsx_workbook_name', 'self.ff_xlsx_worksheet_name'], {}), '(\n self, self.project_root_path, self.ff_db_entry_to_index_dict, self.\n database_formelfrage_path, self.ff_database, self.ff_database_table,\n self.ff_xlsx_workbook_name, self.ff_xlsx_worksheet_name)\n', (26995, 27202), False, 'from Test_Generator_Module import test_generator_modul_datenbanken_erstellen\n'), ((27464, 27577), 'Test_Generator_Module.test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__', 'test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__', (['self', 'self.project_root_path'], {}), '(\n self, self.project_root_path)\n', (27542, 27577), False, 'from Test_Generator_Module import test_generator_modul_ilias_import_test_datei\n'), ((27920, 28051), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (27999, 28051), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((28258, 28387), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (28335, 28387), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((28607, 28736), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (28684, 28736), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((28938, 29070), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (29018, 29070), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((29281, 29428), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (29376, 29428), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((29663, 29810), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (29758, 29810), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((30032, 30179), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (30127, 30179), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((108304, 108459), 'numpy.linspace', 'np.linspace', (['self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p]]]', 'self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]]', 'N'], {}), '(self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p\n ]]], self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]], N)\n', (108315, 108459), True, 'import numpy as np\n'), ((108491, 108516), 'pandas.core.reshape.util.cartesian_product', 'cartesian_product', (['values'], {}), '(values)\n', (108508, 108516), False, 'from pandas.core.reshape.util import cartesian_product\n'), ((135235, 135390), 'numpy.linspace', 'np.linspace', (['self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p]]]', 'self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]]', 'N'], {}), '(self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p\n ]]], self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]], N)\n', (135246, 135390), True, 'import numpy as np\n'), ((135422, 135447), 'pandas.core.reshape.util.cartesian_product', 'cartesian_product', (['values'], {}), '(values)\n', (135439, 135447), False, 'from pandas.core.reshape.util import cartesian_product\n'), ((143671, 143739), 'os.path.join', 'os.path.join', (['self.project_root_path', 'self.ff_description_img_path_1'], {}), '(self.project_root_path, self.ff_description_img_path_1)\n', (143683, 143739), False, 'import os\n'), ((175914, 175982), 'os.path.join', 'os.path.join', (['self.project_root_path', 'self.ff_description_img_path_1'], {}), '(self.project_root_path, self.ff_description_img_path_1)\n', (175926, 175982), False, 'import os\n'), ((235243, 235272), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""questestinterop"""'], {}), "('questestinterop')\n", (235253, 235272), True, 'import xml.etree.ElementTree as ET\n'), ((235307, 235351), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['questestinterop', '"""assessment"""'], {}), "(questestinterop, 'assessment')\n", (235320, 235351), True, 'import xml.etree.ElementTree as ET\n'), ((235383, 235419), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['assessment', '"""section"""'], {}), "(assessment, 'section')\n", (235396, 235419), True, 'import xml.etree.ElementTree as ET\n'), ((235448, 235478), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['section', '"""item"""'], {}), "(section, 'item')\n", (235461, 235478), True, 'import xml.etree.ElementTree as ET\n'), ((235689, 235718), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""questestinterop"""'], {}), "('questestinterop')\n", (235699, 235718), True, 'import xml.etree.ElementTree as ET\n'), ((235747, 235785), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['questestinterop', '"""item"""'], {}), "(questestinterop, 'item')\n", (235760, 235785), True, 'import xml.etree.ElementTree as ET\n'), ((235869, 236102), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question', (['self', 'id_nr', 'self.number_of_entrys', 'item', 'self.formelfrage_pool_qpl_file_path_template', 'self.formelfrage_pool_qpl_file_path_output'], {}), '(\n self, id_nr, self.number_of_entrys, item, self.\n formelfrage_pool_qpl_file_path_template, self.\n formelfrage_pool_qpl_file_path_output)\n', (235955, 236102), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((98484, 98496), 're.escape', 're.escape', (['s'], {}), '(s)\n', (98493, 98496), False, 'import re\n'), ((98876, 98888), 're.escape', 're.escape', (['s'], {}), '(s)\n', (98885, 98888), False, 'import re\n'), ((141344, 141356), 're.escape', 're.escape', (['s'], {}), '(s)\n', (141353, 141356), False, 'import re\n'), ((141736, 141748), 're.escape', 're.escape', (['s'], {}), '(s)\n', (141745, 141748), False, 'import re\n'), ((89812, 89859), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (89827, 89859), False, 'import sqlite3\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: 11360
# datetime: 2021/4/14 10:25
import torch
import torch.nn as nn
from src.model import oneDimenNet
from src.data import data_test_loader
model_path = ''
save_info = torch.load(model_path)
model = oneDimenNet()
criterion = nn.BCELoss()
model.load_state_dict(save_info["model"])
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad(): # close calculation map
for batch_idx, (inputs, targets) in enumerate(data_test_loader):
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
predicted = outputs > 0.5 # binary classification
total += targets.size(0)
correct += (predicted == targets).sum().item() # correct num
print("acc = {}\n".format(correct / total * 100))
print("loss = {}\n".format(test_loss))
| [
"torch.no_grad",
"torch.load",
"torch.nn.BCELoss",
"src.model.oneDimenNet"
] | [((237, 259), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (247, 259), False, 'import torch\n'), ((269, 282), 'src.model.oneDimenNet', 'oneDimenNet', ([], {}), '()\n', (280, 282), False, 'from src.model import oneDimenNet\n'), ((296, 308), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (306, 308), True, 'import torch.nn as nn\n'), ((413, 428), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (426, 428), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
with open("path.txt") as path_fp:
path = path_fp.readlines()
fig, ax = plt.subplots()
for point in path:
x, y = point.split()
ax.plot(float(x), float(y), marker = 'o', color='b')
plt.gca().set_aspect('equal', adjustable='box')
ax.set_xlim(0, 66)
ax.set_ylim(0, 60)
ax.grid('on')
plt.show()
| [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((125, 139), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (137, 139), True, 'import matplotlib.pyplot as plt\n'), ((337, 347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (345, 347), True, 'import matplotlib.pyplot as plt\n'), ((237, 246), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (244, 246), True, 'import matplotlib.pyplot as plt\n')] |
"""
This module implements tasks
"""
import time
import datetime
import logging
import threading
from sentinelhub import CRS
from .constants import MAX_TASKS
from .schemas import TaskSchema
from .utils import get_uuid
LOWER_BOUND = 0
UPPER_BOUND = 2 ** 50
LOGGER = logging.getLogger(__name__)
class Task:
""" Container with task parameters
"""
# TODO: figure what to do with **props in attrs package
# id = attr.ib(validator=instance_of(int))
# bbox = attr.ib(validator=instance_of(BBox))
# time = attr.ib(validator=instance_of(datetime.datetime))
# window_size = attr.ib(validator=instance_of(list))
# data_list = attr.ib(validator=instance_of(list))
def __init__(self, bbox, acq_time, window_shape, task_id=None, data_list=None, **props):
self.task_id = get_uuid() if task_id is None else task_id
self.bbox = bbox
self.acq_time = acq_time
self.window_shape = window_shape
self.data_list = [] if data_list is None else data_list
self.props = props
def get_app_json(self):
bbox_coords = list(self.bbox)
crs = self.bbox.get_crs()
# TODO: use TaskSchema(strict=True).dump instead of this
payload = {
'id': self.task_id,
'bbox': [bbox_coords[1], bbox_coords[0],
bbox_coords[3], bbox_coords[2]] if crs is CRS.WGS84 else bbox_coords,
'crs': int(crs.value),
'window_width': self.window_shape[0],
'window_height': self.window_shape[1],
'datetime': self.acq_time,
'data': self.data_list
}
if 'vector_data' in self.props and self.props['vector_data'] is not None:
payload['vectorData'] = self.props['vector_data']
payload = TaskSchema(strict=True).dump(payload)[0]
payload['window'] = { # TODO: remove this once they change it on frontend
'width': self.window_shape[0],
'height': self.window_shape[1]
}
return payload
class TaskThreading(threading.Thread):
""" Class to handle creating tasks in the back-ground and adding them to Geopedia """
def __init__(self, campaign, store, *args, interval=1, **kwargs):
threading.Thread.__init__(self, target=self.run, *args, **kwargs)
# sleep time interval between geopedia requests
self.campaign = campaign
self.store = store
self.interval = interval
def run(self):
for _ in range(MAX_TASKS):
try:
current_task = self.store.add_task(self.campaign)
LOGGER.info("Task %s added to geopedia", current_task.task_id)
time.sleep(self.interval)
except (RuntimeError, ValueError) as exception:
LOGGER.debug("Error creating tasks in the background: %s", str(exception))
| [
"logging.getLogger",
"threading.Thread.__init__",
"time.sleep"
] | [((270, 297), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (287, 297), False, 'import logging\n'), ((2242, 2307), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self', '*args'], {'target': 'self.run'}), '(self, *args, target=self.run, **kwargs)\n', (2267, 2307), False, 'import threading\n'), ((2690, 2715), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (2700, 2715), False, 'import time\n')] |
from sys import stdin
# import math
# import heapq
# from collections import deque,Counter,defaultdict
# from itertools import permutations,combinations,combinations_with_replacement
# from operator import itemgetter
# from functools import reduce
def ii(): return int(stdin.readline())
def mi(): return map(int, stdin.readline().split())
def li(): return list(mi())
def si(): return stdin.readline()
t = 1
t = ii()
for _ in range(t):
n,s = mi()
sum1 = n*(n+1)//2
if 1<=sum1 - s<=n:
print(sum1-s)
else:
print(-1)
| [
"sys.stdin.readline"
] | [((386, 402), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (400, 402), False, 'from sys import stdin\n'), ((271, 287), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (285, 287), False, 'from sys import stdin\n'), ((315, 331), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (329, 331), False, 'from sys import stdin\n')] |
from setuptools import setup
from categoryeval import __name__, __version__
setup(
name=__name__,
version=__version__,
packages=[__name__],
include_package_data=True,
install_requires=[
'bayesian-optimization==0.6',
'cytoolz==0.10.1',
'scipy==1.4.1',
'scikit-learn==0.21.3',
'matplotlib==3.1.2',
'numpy==1.18.1',
'pyitlib==0.2.2',
],
url='https://github.com/phueb/CategoryEval',
license='',
author='<NAME>',
author_email='<EMAIL>',
description='Evaluate word representations for category knowledge'
) | [
"setuptools.setup"
] | [((78, 524), 'setuptools.setup', 'setup', ([], {'name': '__name__', 'version': '__version__', 'packages': '[__name__]', 'include_package_data': '(True)', 'install_requires': "['bayesian-optimization==0.6', 'cytoolz==0.10.1', 'scipy==1.4.1',\n 'scikit-learn==0.21.3', 'matplotlib==3.1.2', 'numpy==1.18.1',\n 'pyitlib==0.2.2']", 'url': '"""https://github.com/phueb/CategoryEval"""', 'license': '""""""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Evaluate word representations for category knowledge"""'}), "(name=__name__, version=__version__, packages=[__name__],\n include_package_data=True, install_requires=[\n 'bayesian-optimization==0.6', 'cytoolz==0.10.1', 'scipy==1.4.1',\n 'scikit-learn==0.21.3', 'matplotlib==3.1.2', 'numpy==1.18.1',\n 'pyitlib==0.2.2'], url='https://github.com/phueb/CategoryEval', license\n ='', author='<NAME>', author_email='<EMAIL>', description=\n 'Evaluate word representations for category knowledge')\n", (83, 524), False, 'from setuptools import setup\n')] |
import os
import dj_database_url
from .settings import * # noqa: F403
DEBUG = False
SECRET_KEY = os.environ["SECRET_KEY"]
ALLOWED_HOSTS = ["shared-tw.herokuapp.com", "api.shared-tw.icu"]
DATABASES["default"] = dj_database_url.config( # noqa: F405
conn_max_age=600, ssl_require=True
)
AUTHENTICATOR["hash_id_secret"] = os.environ["HASH_ID_SECRET"] # noqa: F405
if "OAUTHLIB_INSECURE_TRANSPORT" in os.environ:
del os.environ["OAUTHLIB_INSECURE_TRANSPORT"]
| [
"dj_database_url.config"
] | [((214, 272), 'dj_database_url.config', 'dj_database_url.config', ([], {'conn_max_age': '(600)', 'ssl_require': '(True)'}), '(conn_max_age=600, ssl_require=True)\n', (236, 272), False, 'import dj_database_url\n')] |
# Copyright (c): <NAME> 2017-2019.
# Author: <NAME>
# Email: <EMAIL>
# Update Date: 2019 - 3 - 21
import torch
import torch.nn as nn
from .Optim.SISR import PerceptualOptimizer
from .Ops.Blocks import EasyConv2d, RB
from ..Util.Utility import upsample
class Espcn(nn.Module):
def __init__(self, channel, scale):
super(Espcn, self).__init__()
conv1 = nn.Conv2d(channel, 64, 5, 1, 2)
conv2 = nn.Conv2d(64, 32, 3, 1, 1)
conv3 = nn.Conv2d(32, channel * scale * scale, 3, 1, 1)
ps = nn.PixelShuffle(scale)
self.body = nn.Sequential(conv1, nn.Tanh(),
conv2, nn.Tanh(),
conv3, nn.Tanh(), ps)
def forward(self, x):
return self.body(x)
class Srcnn(nn.Module):
def __init__(self, channel, filters=(9, 5, 5)):
super(Srcnn, self).__init__()
self.net = nn.Sequential(
EasyConv2d(channel, 64, filters[0], activation='relu'),
EasyConv2d(64, 32, filters[1], activation='relu'),
EasyConv2d(32, channel, filters[2], activation=None))
def forward(self, x):
return self.net(x)
class Vdsr(nn.Module):
def __init__(self, channel, layers=20):
super(Vdsr, self).__init__()
net = [EasyConv2d(channel, 64, 3, activation='relu')]
for i in range(1, layers - 1):
net.append(EasyConv2d(64, 64, 3, activation='relu'))
net.append(EasyConv2d(64, channel, 3))
self.net = nn.Sequential(*net)
def forward(self, x):
return self.net(x) + x
class DnCnn(nn.Module):
def __init__(self, channel, layers, bn):
super(DnCnn, self).__init__()
net = [EasyConv2d(channel, 64, 3, activation='relu', use_bn=bn)]
for i in range(1, layers - 1):
net.append(EasyConv2d(64, 64, 3, activation='relu', use_bn=bn))
net.append(EasyConv2d(64, channel, 3))
self.net = nn.Sequential(*net)
def forward(self, x):
return self.net(x) + x
class Drcn(nn.Module):
def __init__(self, scale, channel, n_recur, filters):
from torch.nn import Parameter
super(Drcn, self).__init__()
self.entry = nn.Sequential(
EasyConv2d(channel, filters, 3, activation='relu'),
EasyConv2d(filters, filters, 3, activation='relu'))
self.exit = nn.Sequential(
EasyConv2d(filters, filters, 3, activation='relu'),
EasyConv2d(filters, channel, 3))
self.conv = EasyConv2d(filters, filters, 3, activation='relu')
self.output_weights = Parameter(torch.empty(n_recur + 1))
torch.nn.init.uniform_(self.output_weights, 0, 1)
self.n_recur = n_recur
self.scale = scale
def forward(self, x):
bic = upsample(x, self.scale)
y = [self.entry(bic)]
for i in range(self.n_recur):
y.append(self.conv(y[-1]))
sr = [self.exit(i) for i in y[1:]]
final = bic * self.output_weights[0]
for i in range(len(sr)):
final = final + self.output_weights[i + 1] * sr[i]
return final
class Drrn(nn.Module):
def __init__(self, channel, n_ru, n_rb, filters):
super(Drrn, self).__init__()
self.entry0 = EasyConv2d(channel, filters, 3, activation='relu')
for i in range(1, n_rb):
setattr(self, f'entry{i}',
EasyConv2d(filters, filters, 3, activation='relu'))
self.n_rb = n_rb
self.rb = RB(filters, kernel_size=3, activation='relu')
self.n_ru = n_ru
self.exit = EasyConv2d(filters, channel, 3)
def forward(self, x):
for i in range(self.n_rb):
entry = getattr(self, f'entry{i}')
y = entry(x)
for j in range(self.n_ru):
y = self.rb(y)
x = y
return self.exit(x)
class ESPCN(PerceptualOptimizer):
def __init__(self, scale, channel, **kwargs):
self.espcn = Espcn(channel, scale)
super(ESPCN, self).__init__(scale, channel, **kwargs)
def fn(self, tensor):
return self.espcn(tensor * 2 - 1) / 2 + 0.5
class SRCNN(PerceptualOptimizer):
def __init__(self, scale, channel, **kwargs):
filters = kwargs.get('filters', (9, 5, 5))
self.srcnn = Srcnn(channel, filters)
super(SRCNN, self).__init__(scale, channel, **kwargs)
def fn(self, tensor):
x = upsample(tensor, self.scale)
return self.srcnn(x)
class VDSR(PerceptualOptimizer):
def __init__(self, scale, channel, **kwargs):
layers = kwargs.get('layers', 20)
self.vdsr = Vdsr(channel, layers)
super(VDSR, self).__init__(scale, channel, **kwargs)
def fn(self, tensor):
x = upsample(tensor, self.scale)
return self.vdsr(x)
class DNCNN(PerceptualOptimizer):
def __init__(self, channel, noise, **kwargs):
layers = kwargs.get('layers', 15)
bn = kwargs.get('bn', True)
self.dncnn = DnCnn(channel, layers, bn)
self.noise = noise / 255
self.norm = torch.distributions.normal.Normal(0, self.noise)
super(DNCNN, self).__init__(1, channel, **kwargs)
def fn(self, tensor):
if self.noise > 0:
device = tensor.device
noise = self.norm.sample(tensor.shape)
tensor += noise.to(device)
return self.dncnn(tensor)
class DRCN(PerceptualOptimizer):
def __init__(self, scale, channel, n_recur, **kwargs):
self.drcn = Drcn(scale, channel, n_recur, 128)
super(DRCN, self).__init__(scale, channel, **kwargs)
def fn(self, tensor):
return self.drcn(tensor)
class DRRN(PerceptualOptimizer):
def __init__(self, scale, channel, n_rb, n_ru, **kwargs):
self.drrn = Drrn(channel, n_ru, n_rb, 128)
super(DRRN, self).__init__(scale, channel, **kwargs)
def fn(self, tensor):
x = upsample(tensor, self.scale)
return self.drrn(x)
| [
"torch.nn.Tanh",
"torch.nn.PixelShuffle",
"torch.nn.Sequential",
"torch.distributions.normal.Normal",
"torch.nn.Conv2d",
"torch.empty",
"torch.nn.init.uniform_"
] | [((368, 399), 'torch.nn.Conv2d', 'nn.Conv2d', (['channel', '(64)', '(5)', '(1)', '(2)'], {}), '(channel, 64, 5, 1, 2)\n', (377, 399), True, 'import torch.nn as nn\n'), ((412, 438), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(32)', '(3)', '(1)', '(1)'], {}), '(64, 32, 3, 1, 1)\n', (421, 438), True, 'import torch.nn as nn\n'), ((451, 498), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(channel * scale * scale)', '(3)', '(1)', '(1)'], {}), '(32, channel * scale * scale, 3, 1, 1)\n', (460, 498), True, 'import torch.nn as nn\n'), ((508, 530), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (523, 530), True, 'import torch.nn as nn\n'), ((1411, 1430), 'torch.nn.Sequential', 'nn.Sequential', (['*net'], {}), '(*net)\n', (1424, 1430), True, 'import torch.nn as nn\n'), ((1818, 1837), 'torch.nn.Sequential', 'nn.Sequential', (['*net'], {}), '(*net)\n', (1831, 1837), True, 'import torch.nn as nn\n'), ((2457, 2506), 'torch.nn.init.uniform_', 'torch.nn.init.uniform_', (['self.output_weights', '(0)', '(1)'], {}), '(self.output_weights, 0, 1)\n', (2479, 2506), False, 'import torch\n'), ((4673, 4721), 'torch.distributions.normal.Normal', 'torch.distributions.normal.Normal', (['(0)', 'self.noise'], {}), '(0, self.noise)\n', (4706, 4721), False, 'import torch\n'), ((568, 577), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (575, 577), True, 'import torch.nn as nn\n'), ((616, 625), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (623, 625), True, 'import torch.nn as nn\n'), ((664, 673), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (671, 673), True, 'import torch.nn as nn\n'), ((2427, 2451), 'torch.empty', 'torch.empty', (['(n_recur + 1)'], {}), '(n_recur + 1)\n', (2438, 2451), False, 'import torch\n')] |
# Generated by Django 1.11.22 on 2019-07-09 15:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courseware', '0009_auto_20190703_1955'),
]
operations = [
migrations.AlterField(
model_name='studentmodule',
name='student',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.models.ForeignKey"
] | [((416, 533), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'db_constraint': '(False)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(db_constraint=False, on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL)\n', (433, 533), False, 'from django.db import migrations, models\n')] |
from argparse import ArgumentParser, Namespace
from .core import build_output, infer_params
from .utils import find_target, load_config
from pathlib import Path
from typing import Dict
def build_parser():
parser = ArgumentParser()
parser.add_argument('-t', '--target', type=str, default='common', help='tfvars search target')
parser.add_argument('-k', '--key', type=str, help='variable to read')
parser.add_argument('-c', '--component', choices=['bucket', 'object', 'table', 'env'])
parser.add_argument('-x', '--extension', type=str, default='tfvars')
parser.add_argument('--bucket-name', type=str, default='terraformstate')
parser.add_argument('--table-name', type=str, default='terraformlock')
parser.add_argument('--prefix', type=str, default='{app_name}-')
parser.add_argument('--suffix', type=str, default='-{app_env}-{company}')
parser.add_argument('--state-name', type=str, default='terraform.tfstate')
parser.add_argument('--env-var', type=str, default='app_env')
parser.add_argument('--env-pos', type=int, default=0)
return parser
def main():
parser: ArgumentParser = build_parser()
args: Namespace = parser.parse_args()
name: str = f'{args.target}.{args.extension}'
target: Path = find_target(name)
# TODO: add error handling
if not target:
print('could not find project root')
return
config_data: Dict[str, str] = load_config(target)
infer_data: Dict[str, str] = infer_params(project_dir=target.parent, app_env_var=args.env_var, app_env_pos=args.env_pos)
data: Dict[str, str] = {**config_data, **infer_data}
if not data[args.env_var] or not data['construct']:
print('could not determine environment or construct')
return
output: str = build_output(data, key=args.key, component=args.component, prefix=args.prefix, suffix=args.suffix,
bucket_name=args.bucket_name, table_name=args.table_name, state_name=args.state_name,
app_env_var=args.env_var)
if output:
print(output, end='')
| [
"argparse.ArgumentParser"
] | [((220, 236), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (234, 236), False, 'from argparse import ArgumentParser, Namespace\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from django import forms
from django.core.mail import send_mail
from django.template import Context
from ProyectoDeGrado.settings import MEDIA_ROOT
from apps.administrador.models import *
from django.contrib.auth.models import User
class PerfilForm(forms.ModelForm):
class Meta:
model = Perfil
fields = ['usuario', 'avatar', 'sede', 'codigo', 'carrera']
widgets = {
'usuario': forms.Select(attrs={'class':'selectpicker', 'disabled':'disabled', 'data-width':'100%', 'data-live-search':'true','data-container':'body'}),
'avatar':forms.FileInput(attrs={'class':'file'}),
'sede':forms.Select(attrs={'class':'selectpicker', 'data-width':'100%', 'data-live-search':'true','data-container':'body'}),
'codigo':forms.TextInput(attrs={'class':'form-control'}),
'carrera':forms.SelectMultiple(attrs={'class':'selectpicker', 'data-width':'100%', 'data-live-search':'true','data-container':'body','title':'Seleccione sus carreras'})
}
def __init__(self, *args, **kwargs):
super(PerfilForm, self).__init__(*args, **kwargs)
self.fields['sede'].empty_label = "Seleccione la su sede"
def enviar(self, data):
link = "http://www.repositio.com/activate/"+data['username']+"/"+ data['activation_key']
message = "Continue con el proceso de registro por medio de este link "+link
send_mail(data['email_subject'], message, 'Repositio <<EMAIL>>', [data['username']+data['dominio']],fail_silently=False)
def save(self, data):
perfil = Perfil()
usuario = User.objects.get(username = data['username'])
perfil.usuario = usuario
perfil.activation_key=data['activation_key']
perfil.key_expires=datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=2), "%Y-%m-%d %H:%M:%S")
perfil.save()
return perfil
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'password', 'email', 'first_name', 'last_name']
widgets = {
'username': forms.TextInput(attrs={'class':'form-control'}),
'password': forms.PasswordInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control', 'disabled':'disabled'}),
'first_name': forms.TextInput(attrs={'class':'form-control'}),
'last_name': forms.TextInput(attrs={'class':'form-control'}),
}
def save(self, data):
usuario = User()
usuario.username = data['username']
usuario.email = data['username']+data['dominio']
usuario.save()
return usuario
| [
"django.contrib.auth.models.User",
"django.core.mail.send_mail",
"django.forms.Select",
"django.forms.PasswordInput",
"django.forms.FileInput",
"datetime.datetime.now",
"django.forms.SelectMultiple",
"django.forms.EmailInput",
"django.forms.TextInput",
"datetime.timedelta",
"django.contrib.auth.models.User.objects.get"
] | [((1468, 1596), 'django.core.mail.send_mail', 'send_mail', (["data['email_subject']", 'message', '"""Repositio <<EMAIL>>"""', "[data['username'] + data['dominio']]"], {'fail_silently': '(False)'}), "(data['email_subject'], message, 'Repositio <<EMAIL>>', [data[\n 'username'] + data['dominio']], fail_silently=False)\n", (1477, 1596), False, 'from django.core.mail import send_mail\n'), ((1660, 1703), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "data['username']"}), "(username=data['username'])\n", (1676, 1703), False, 'from django.contrib.auth.models import User\n'), ((2584, 2590), 'django.contrib.auth.models.User', 'User', ([], {}), '()\n', (2588, 2590), False, 'from django.contrib.auth.models import User\n'), ((482, 636), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'selectpicker', 'disabled': 'disabled', 'data-width': '100%',\n 'data-live-search': 'true', 'data-container': 'body'}"}), "(attrs={'class': 'selectpicker', 'disabled': 'disabled',\n 'data-width': '100%', 'data-live-search': 'true', 'data-container': 'body'}\n )\n", (494, 636), False, 'from django import forms\n'), ((644, 684), 'django.forms.FileInput', 'forms.FileInput', ([], {'attrs': "{'class': 'file'}"}), "(attrs={'class': 'file'})\n", (659, 684), False, 'from django import forms\n'), ((704, 829), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'selectpicker', 'data-width': '100%', 'data-live-search': 'true',\n 'data-container': 'body'}"}), "(attrs={'class': 'selectpicker', 'data-width': '100%',\n 'data-live-search': 'true', 'data-container': 'body'})\n", (716, 829), False, 'from django import forms\n'), ((843, 891), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (858, 891), False, 'from django import forms\n'), ((914, 1087), 'django.forms.SelectMultiple', 'forms.SelectMultiple', ([], {'attrs': "{'class': 'selectpicker', 'data-width': '100%', 'data-live-search': 'true',\n 'data-container': 'body', 'title': 'Seleccione sus carreras'}"}), "(attrs={'class': 'selectpicker', 'data-width': '100%',\n 'data-live-search': 'true', 'data-container': 'body', 'title':\n 'Seleccione sus carreras'})\n", (934, 1087), False, 'from django import forms\n'), ((2160, 2208), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2175, 2208), False, 'from django import forms\n'), ((2233, 2285), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2252, 2285), False, 'from django import forms\n'), ((2307, 2380), 'django.forms.EmailInput', 'forms.EmailInput', ([], {'attrs': "{'class': 'form-control', 'disabled': 'disabled'}"}), "(attrs={'class': 'form-control', 'disabled': 'disabled'})\n", (2323, 2380), False, 'from django import forms\n'), ((2406, 2454), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2421, 2454), False, 'from django import forms\n'), ((2480, 2528), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2495, 2528), False, 'from django import forms\n'), ((1846, 1869), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1867, 1869), False, 'import datetime\n'), ((1872, 1898), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1890, 1898), False, 'import datetime\n')] |
import numpy as np
import matplotlib.pyplot as plt
plt.interactive(False)
import traces_CHR as mtc
import basic_fun as bf
import fields_CHR as mfd
import globals4simulation as tng
import pickle
import os
# ----
def take_snap_shot(k, stack_loc):
# k is time_step_index
base_name = str(int((k + 1) / tng.n_hr)).zfill(4)
png_path = os.path.join(stack_loc, 'AHL_' + base_name + '.png')
qS = mfd.AHLs.sum(axis=0)
# save raw nparray, takes more space, get the option to select range
np_path = os.path.join(stack_loc, f'raw_AHL_{base_name}.npy')
np.save(np_path, qS)
def gen_summary_fig(save_impath=None):
# generate summary after all conditions are simulated
print('generating report')
fig, axs = plt.subplots(2,3)
for i, result_loc in enumerate(tng.result_locs):
# get the pickle, and read AHL field
pickle_path = os.path.join(result_loc, tng.pickle_fname)
with open(pickle_path, 'rb') as f:
# graph 1
fields, traces, old_nstep_end = pickle.load(f)
AHL_plot = fields[4]
bf.show_field(AHL_plot.sum(axis=0), fig=fig,
ax=axs[i, 0], log = True) # , vmax=1, vmin=0
# graph 2
if tng.flag_record:
repressors, qIs, AiiA, qSs, mC = traces
t = mtc.t
bf.show_plot(t, qSs[0],ax = axs[i,1], log = True)
# graph 3
all_AHL = AHL_plot.sum(axis=0)
ahl_line = all_AHL[tng.r0,:]
x = range(len(ahl_line))
bf.show_plot(x, ahl_line, ax = axs[i,2], vmin = 0, log = False)
# bf.show_plot(t, qIs[0],ax = axs[i,2], log = True)
if save_impath != None:
plt.savefig(save_impath, dpi=300)
os.startfile(save_impath)
def save_fields(pickle_path):
# save simulation raw data
fields = [mfd.strain_id_map, mfd.repressors, mfd.qIs, mfd.aiiA, mfd.AHLs, mfd.mC]
traces = [mtc.repressors, mtc.qIs, mtc.AiiA, mtc.AHLs, mtc.mC]
number = tng.nstep_end
with open(pickle_path, 'wb') as f:
pickle.dump([fields, traces, number], f) | [
"matplotlib.pyplot.savefig",
"pickle.dump",
"fields_CHR.AHLs.sum",
"os.path.join",
"pickle.load",
"basic_fun.show_plot",
"matplotlib.pyplot.interactive",
"os.startfile",
"matplotlib.pyplot.subplots",
"numpy.save"
] | [((53, 75), 'matplotlib.pyplot.interactive', 'plt.interactive', (['(False)'], {}), '(False)\n', (68, 75), True, 'import matplotlib.pyplot as plt\n'), ((360, 412), 'os.path.join', 'os.path.join', (['stack_loc', "('AHL_' + base_name + '.png')"], {}), "(stack_loc, 'AHL_' + base_name + '.png')\n", (372, 412), False, 'import os\n'), ((425, 445), 'fields_CHR.AHLs.sum', 'mfd.AHLs.sum', ([], {'axis': '(0)'}), '(axis=0)\n', (437, 445), True, 'import fields_CHR as mfd\n'), ((536, 587), 'os.path.join', 'os.path.join', (['stack_loc', 'f"""raw_AHL_{base_name}.npy"""'], {}), "(stack_loc, f'raw_AHL_{base_name}.npy')\n", (548, 587), False, 'import os\n'), ((593, 613), 'numpy.save', 'np.save', (['np_path', 'qS'], {}), '(np_path, qS)\n', (600, 613), True, 'import numpy as np\n'), ((765, 783), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {}), '(2, 3)\n', (777, 783), True, 'import matplotlib.pyplot as plt\n'), ((906, 948), 'os.path.join', 'os.path.join', (['result_loc', 'tng.pickle_fname'], {}), '(result_loc, tng.pickle_fname)\n', (918, 948), False, 'import os\n'), ((1781, 1814), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_impath'], {'dpi': '(300)'}), '(save_impath, dpi=300)\n', (1792, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1849), 'os.startfile', 'os.startfile', (['save_impath'], {}), '(save_impath)\n', (1836, 1849), False, 'import os\n'), ((2149, 2189), 'pickle.dump', 'pickle.dump', (['[fields, traces, number]', 'f'], {}), '([fields, traces, number], f)\n', (2160, 2189), False, 'import pickle\n'), ((1061, 1075), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1072, 1075), False, 'import pickle\n'), ((1612, 1670), 'basic_fun.show_plot', 'bf.show_plot', (['x', 'ahl_line'], {'ax': 'axs[i, 2]', 'vmin': '(0)', 'log': '(False)'}), '(x, ahl_line, ax=axs[i, 2], vmin=0, log=False)\n', (1624, 1670), True, 'import basic_fun as bf\n'), ((1400, 1447), 'basic_fun.show_plot', 'bf.show_plot', (['t', 'qSs[0]'], {'ax': 'axs[i, 1]', 'log': '(True)'}), '(t, qSs[0], ax=axs[i, 1], log=True)\n', (1412, 1447), True, 'import basic_fun as bf\n')] |
# -*- coding: UTF-8 -*-
import numpy as np
from caid.cad_geometry import square as domain
from caid.core.bspline import bsp
# ------------------------------
def findSpan(list_x, list_t, list_p, list_n):
x = list_x[0]
y = list_x[1]
tx = list_t[0]
ty = list_t[1]
px = list_p[0]
py = list_p[1]
nx = list_n[0]
ny = list_n[1]
leftmkx = bsp.FindSpan(px,tx,x) - px
leftmky = bsp.FindSpan(py,ty,y) - py
return leftmkx,leftmky
# ------------------------------
# ------------------------------
def evalSplines(list_x, list_t, list_p, list_n):
x = list_x[0]
y = list_x[1]
tx = list_t[0]
ty = list_t[1]
px = list_p[0]
py = list_p[1]
nx = list_n[0]
ny = list_n[1]
Nx = np.zeros(px+1)
Ny = np.zeros(py+1)
Nx[:] = bsp.EvalBasisFuns(px,tx,x)
Ny[:] = bsp.EvalBasisFuns(py,ty,y)
return Nx,Ny
# ------------------------------
# ------------------------------
def getSites(Nx,Ny):
# TODO : a changer pour le SL
"""
generates the current position of our sites
"""
x = np.linspace(0., 1., Nx)
y = np.linspace(0., 1., Ny)
X,Y = np.meshgrid(x,y)
return X,Y
# ------------------------------
# ------------------------------
def assembleM(X, Y, list_t, list_p, list_n):
"""
"""
n,m = X.shape
nx = list_n[0]
ny = list_n[1]
px = list_p[0]
py = list_p[1]
# initialize the matrix M of size (nx ny, n m)
M = np.zeros((n*m, nx*ny))
for i in range(0,n):
for j in range(0,m):
I = j * n + i
x = X[i,j] ; y = Y[i,j]
Nx, Ny = evalSplines([x,y], list_t, list_p, list_n)
leftmkx, leftmky = findSpan ([x,y], list_t, list_p, list_n)
for ip in range(0, px+1):
for jp in range(0, py+1):
# J = (jp + leftmky) * nx + ip + leftmkx
J = (ip + leftmkx) * ny + jp + leftmky
M[I,J] = Nx[ip] * Ny[jp]
return M
# ------------------------------
# ------------------------------
def getCoefC1Constraints(face, list_t, list_p, list_n):
# ...
# p : degree of the slave domain current curve
# d : denominator for the slave domain current curve
if face in [1,3]:
axis = 1
if face in [2,4]:
axis = 0
if face in [1,2]:
p = list_p[axis]
d = list_t[axis][p+1]
if face in [3,4]:
p = list_p[axis]
d = list_t[axis][-1] - list_t[axis][-(p+1+1)]
c = p / d
return c
# ...
# ------------------------------
# ------------------------------
def getC1Constraints(face, list_t, list_p, list_n):
c = getCoefC1Constraints(face, list_t, list_p, list_n)
nx = list_n[0]
ny = list_n[1]
list_v = []
for i in range(0, nx):
V = np.zeros((nx,ny))
if face == 1:
V[i,0] = -c
V[i,1] = c
list_v.append(V.reshape(nx*ny))
if face == 3:
V[i,-1] = -c
V[i,-2] = c
list_v.append(V.reshape(nx*ny))
for j in range(0, ny):
V = np.zeros((nx,ny))
if face == 2:
V[0,j] = -c
V[1,j] = c
list_v.append(V.reshape(nx*ny))
if face == 4:
V[-1,j] = -c
V[-2,j] = c
list_v.append(V.reshape(nx*ny))
return list_v
# ------------------------------
#-----------------------------------
class surfint(object):
def __init__(self, geometry, space=None, constraints=[]):
"""
initialize the surfit object
PDE is the Differential operator to use for smoothing (usually a 2nd
order)
constraints is a list of dictionaries that must be of the following form
constraints[i] is {'patch_id_m', 'face_m', 'patch_id_s', 'face_s',
'type'}
patch_id_m is the master patch id
face_m is the face id in the master patch
patch_id_s is the slave patch id
face_s is the face id in the slave patch
type is the constraint's type: C1, C2, ... (default: C1)
ib is the starting index in the face element (default:0 )
ie is the ending index in the face element (default:-1 )
"""
self.geometry = geometry
self.postAssembly = False
self.nConstraints = 0
self.ConstIndices = []
self.ConstVals = []
self.constraints = constraints
# space may be None
self.V = space
# @property
# def system(self):
# return self.PDE.system.get()
#
# @property
# def space(self):
# return self.PDE.space
def AssembleLocalMatrix(self, nrb):
list_t = nrb.knots
list_p = nrb.degree
list_n = nrb.shape
nx = list_n[0]
ny = list_n[1]
n,m = list_n
X,Y = getSites(n,m)
from scipy.sparse import csr_matrix
A = csr_matrix(assembleM(X, Y, list_t, list_p, list_n))
return A
def interpolate(self, f):
nrb = self.geometry[0]
list_t = nrb.knots
list_p = nrb.degree
list_n = nrb.shape
nx = list_n[0]
ny = list_n[1]
n,m = list_n
X,Y = getSites(n,m)
M = self.AssembleLocalMatrix(nrb)
list_faces = []
list_v = []
for face in list_faces:
list_v += getC1Constraints(face, list_t, list_p, list_n)
F = f(X,Y).reshape(n*m)
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import splu
# ...
A = np.zeros((nx*ny,nx*ny))
A[:n*m, :nx*ny] = M.todense()
nConstraints = len(list_v)
assert(nx*ny-n*m==nConstraints)
for i,v in enumerate(list_v):
A[n*m+i, :] = v
# ...
# ...
B = np.zeros(nx*ny)
B[:n*m] = F
# ...
A_ = csr_matrix(A)
A_op = splu(A_.tocsc())
y = A_op.solve(B)
return y.reshape((nx,ny))
# ----------------------------------------------------------
if __name__ == '__main__':
sin = np.sin; cos = np.cos ; pi = np.pi
# f = lambda x,y : sin ( 2*pi*x ) * sin ( 4*pi*y )
# dxf = lambda x,y : 2*pi * cos ( 2*pi*x ) * sin ( 4*pi*y )
# dyf = lambda x,y : 4*pi * sin ( 2*pi*x ) * cos ( 4*pi*y )
f = lambda x,y : 0.5 * (x**2 + y**2)
dxf = lambda x,y : x
dyf = lambda x,y : y
from caid.cad_geometry import square as domain
geo = domain(n=[31,31], p=[2,2])
interpolator = surfint(geo)
y = interpolator.interpolate(f)
# list_t = nrb.knots
# list_p = nrb.degree
# list_n = nrb.shape
#
# nx = list_n[0]
# ny = list_n[1]
#
# n,m = list_n
## n = nx-1 ; m = ny
# X,Y = getSites(n,m)
#
# M = assembleM(X, Y, list_t, list_p, list_n)
#
## list_faces = [1]
# list_faces = []
# list_v = []
# for face in list_faces:
# list_v += getC1Constraints(face, list_t, list_p, list_n)
#
# F = f(X,Y).reshape(n*m)
#
# from scipy.sparse import *
# from scipy.sparse.linalg import spsolve
#
# # ...
# A = np.zeros((nx*ny,nx*ny))
# A[:n*m, :nx*ny] = M
# nConstraints = len(list_v)
#
# print "shape M = ", M.shape
# print "shape A = ", A.shape
# print "nConstraints = ", nConstraints
#
# print nx*ny-n*m
# print nConstraints
# assert(nx*ny-n*m==nConstraints)
# for i,v in enumerate(list_v):
# A[n*m+i, :] = v
# # ...
#
# # ...
# b = dyf(X[:,0], Y[:,0])
# print "b.shape = ", b.shape
# print "expected = ", nConstraints
# # ...
#
# # ...
# B = np.zeros(nx*ny)
# B[:n*m] = F
# try:
# B[n*m:] = b # TODO a mettre les valeurs des derivees
# except:
# pass
# # ...
#
# A_ = csr_matrix(A)
# y = spsolve(A_, B)
import pylab as pl
pl.contourf(y) ; pl.colorbar() ; pl.show()
| [
"pylab.contourf",
"caid.core.bspline.bsp.FindSpan",
"caid.core.bspline.bsp.EvalBasisFuns",
"numpy.zeros",
"caid.cad_geometry.square",
"numpy.linspace",
"pylab.colorbar",
"numpy.meshgrid",
"scipy.sparse.csr_matrix",
"pylab.show"
] | [((747, 763), 'numpy.zeros', 'np.zeros', (['(px + 1)'], {}), '(px + 1)\n', (755, 763), True, 'import numpy as np\n'), ((771, 787), 'numpy.zeros', 'np.zeros', (['(py + 1)'], {}), '(py + 1)\n', (779, 787), True, 'import numpy as np\n'), ((799, 827), 'caid.core.bspline.bsp.EvalBasisFuns', 'bsp.EvalBasisFuns', (['px', 'tx', 'x'], {}), '(px, tx, x)\n', (816, 827), False, 'from caid.core.bspline import bsp\n'), ((838, 866), 'caid.core.bspline.bsp.EvalBasisFuns', 'bsp.EvalBasisFuns', (['py', 'ty', 'y'], {}), '(py, ty, y)\n', (855, 866), False, 'from caid.core.bspline import bsp\n'), ((1077, 1102), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'Nx'], {}), '(0.0, 1.0, Nx)\n', (1088, 1102), True, 'import numpy as np\n'), ((1109, 1134), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'Ny'], {}), '(0.0, 1.0, Ny)\n', (1120, 1134), True, 'import numpy as np\n'), ((1143, 1160), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1154, 1160), True, 'import numpy as np\n'), ((1458, 1484), 'numpy.zeros', 'np.zeros', (['(n * m, nx * ny)'], {}), '((n * m, nx * ny))\n', (1466, 1484), True, 'import numpy as np\n'), ((6474, 6502), 'caid.cad_geometry.square', 'domain', ([], {'n': '[31, 31]', 'p': '[2, 2]'}), '(n=[31, 31], p=[2, 2])\n', (6480, 6502), True, 'from caid.cad_geometry import square as domain\n'), ((7820, 7834), 'pylab.contourf', 'pl.contourf', (['y'], {}), '(y)\n', (7831, 7834), True, 'import pylab as pl\n'), ((7837, 7850), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (7848, 7850), True, 'import pylab as pl\n'), ((7853, 7862), 'pylab.show', 'pl.show', ([], {}), '()\n', (7860, 7862), True, 'import pylab as pl\n'), ((372, 395), 'caid.core.bspline.bsp.FindSpan', 'bsp.FindSpan', (['px', 'tx', 'x'], {}), '(px, tx, x)\n', (384, 395), False, 'from caid.core.bspline import bsp\n'), ((413, 436), 'caid.core.bspline.bsp.FindSpan', 'bsp.FindSpan', (['py', 'ty', 'y'], {}), '(py, ty, y)\n', (425, 436), False, 'from caid.core.bspline import bsp\n'), ((2817, 2835), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (2825, 2835), True, 'import numpy as np\n'), ((3105, 3123), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (3113, 3123), True, 'import numpy as np\n'), ((5586, 5614), 'numpy.zeros', 'np.zeros', (['(nx * ny, nx * ny)'], {}), '((nx * ny, nx * ny))\n', (5594, 5614), True, 'import numpy as np\n'), ((5831, 5848), 'numpy.zeros', 'np.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (5839, 5848), True, 'import numpy as np\n'), ((5895, 5908), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A'], {}), '(A)\n', (5905, 5908), False, 'from scipy.sparse import csr_matrix\n')] |
"""
TODO: Add doc string.
"""
import numpy as np
from numpy import std
import os
import sys
import pandas as pd
from scipy.stats import ttest_rel, ttest_ind, pearsonr, ttest_1samp
from statistics import mean
from math import sqrt
from t_test_clustered_data import get_sorted_clusters, get_vectors, get_clusters, CLUSTERED_FILENAME_POSFIX, get_repo_name
from t_test_clustered_data import get_clusters
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
from plot_pubs_in_clusters import get_color
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import *
from plot_gain_scores import get_cluster_label
PUBLICATION_ID_COLUMN = "PublicationID"
TOOLS_COLUMN = "Tools"
TOOLS_SEPARATOR = ";"
# This list is defined so to minimize using very similar markers as much as possible.
MARKERS = ["o", "^", "x", "v", "1", "2", "3", "4", ">", "<", "*", "P", "+", "D", "X", "d"]
# It is certainly a bad practice to hard-code such information. However, without such
# manipulations annotations may overlap, and matplotlib does not offer any feature
# out-of-box to address it. There are some open-source libraries developed to address
# the overlapping annotation issue (e.g., https://github.com/Phlya/adjustText); however,
# their output was not satisfactory/elegant.
# Therefore, this hard-coded modifications is used as a hacky/temporary workaround.
OFFSETS = {"Bioconda": (50,0), "Bioconductor": (-45, 0), "BioTools": (-50, 0), "ToolShed": (0, -35)}
def get_marker(i):
if i<len(MARKERS):
return MARKERS[i]
else:
# TODO: there should be a better alternative.
return "."
def get_clustered_repositories(input_path):
filenames = []
repositories = []
for root, dirpath, files in os.walk(input_path):
for filename in files:
if os.path.splitext(filename)[1] == ".csv" and \
os.path.splitext(filename)[0].endswith(CLUSTERED_FILENAME_POSFIX):
filenames.append(os.path.join(root, filename))
repositories.append(get_repo_name(filename))
return filenames, repositories
def get_citations_count(tools):
_, pre_citations_vectors, post_citations_vectors, _, _, _, delta = get_vectors(tools)
pre_citations = []
for citation in pre_citations_vectors:
pre_citations.append(np.max(citation))
post_citations = []
for citation in post_citations_vectors:
post_citations.append(np.max(citation))
return pre_citations, post_citations
def get_pub_tool_count(filename):
"""
Returns the number of unique tools and publications in each
cluster of the given repository filename.
"""
clusters = get_clusters(filename)
pubs = {}
tools = {}
for k in clusters.groups:
if k not in pubs:
pubs[k] = {}
tools[k] = {}
for index, row in clusters.get_group(k).iterrows():
pub_id = row.get(PUBLICATION_ID_COLUMN)
if pub_id not in pubs[k]:
pubs[k][pub_id] = 0
tool_names = (row.get(TOOLS_COLUMN)).split(TOOLS_SEPARATOR)
for name in tool_names:
if name not in tools[k]:
tools[k][name] = 0
cluster_pubs_count = {}
for k in pubs:
cluster_pubs_count[k] = len(pubs[k])
cluster_tools_count = {}
for k in tools:
cluster_tools_count[k] = len(tools[k])
return sum(cluster_pubs_count.values()), cluster_pubs_count, sum(cluster_tools_count.values()), cluster_tools_count
def plot_clustered(input_path, filenames, repositories):
fig, ax = set_plot_style(1, 1)
i = 0
max_x = 0
max_y = 0
repo_scatter = {}
cluster_scatter = {}
add_cluster_scatter = True
for filename in filenames:
add_repo_scatter = True
c_pubs, ck_pubs, c_tools, ck_tools = get_pub_tool_count(filename)
cluster_count = len(ck_pubs.keys())
j = 0
for k in ck_pubs:
max_x = max(max_x, ck_pubs[k])
max_y = max(max_y, ck_tools[k])
scatter = ax.scatter(ck_pubs[k], ck_tools[k], marker=get_marker(j), color=get_color(i), alpha=0.5, s=80)
if add_repo_scatter:
repo_scatter[get_repo_name(filename)] = scatter
add_repo_scatter = False
if add_cluster_scatter:
cluster_scatter[get_cluster_label(cluster_count, k)] = scatter
j += 1
add_cluster_scatter = False
i += 1
# The default range of plt when `s` is set in the `scatter`
# method does not keep all the points in the canvas; so their
# values are overridden.
ax.set_ylim(bottom=0.5, top=max_y + (max_y * 0.5))
ax.set_xlim(left=0.5, right=max_x + (max_x * 0.5))
ax.set_yscale('log')
ax.set_xscale('log')
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
ax.set_xlabel("\nPublications Count")
ax.set_ylabel("Tools Count\n")
# It is required to add legend through `add_artist` for it not be overridden by the second legend.
l1 = ax.legend(repo_scatter.values(), repo_scatter.keys(), scatterpoints=1, loc='lower right', ncol=2, title="Repositories")
ax.add_artist(l1)
l2 = ax.legend(cluster_scatter.values(), cluster_scatter.keys(), scatterpoints=1, loc='upper left', ncol=2, title="Clusters")
image_file = os.path.join(input_path, 'plot_pub_tool_clustered.png')
if os.path.isfile(image_file):
os.remove(image_file)
plt.savefig(image_file, bbox_inches='tight')
plt.close()
def plot(input_path, filenames, repositories):
fig, ax = set_plot_style(1, 1)
i = 0
max_x = 0
max_y = 0
repo_scatter = {}
cluster_scatter = {}
add_cluster_scatter = True
xs = []
ys = []
zs = []
for filename in filenames:
repo_color = get_color(i)
add_repo_scatter = True
c_pubs, _, c_tools, _ = get_pub_tool_count(filename)
max_x = max(max_x, c_pubs)
max_y = max(max_y, c_tools)
tools = pd.read_csv(filename, header=0, sep='\t')
pre_citations, post_citations = get_citations_count(tools)
xs.append(c_pubs)
ys.append(c_tools)
# it is multiplied by 2 so to make it a bit bigger on the plot so it can
# be seen more easily.
z = ((sum(pre_citations) + sum(post_citations)) / c_pubs) * 2
zs.append(z)
scatter = ax.scatter(c_pubs, c_tools, color=repo_color, alpha=0.5, s=z)
repo_name = get_repo_name(filename)
z_str = '{0:.1f}'.format(z / 2.0)
ax.annotate(\
f"{repo_name}\n({c_pubs}, {c_tools}, {z_str})", \
xy=(c_pubs, c_tools), \
color=repo_color,
textcoords="offset points", \
xytext=OFFSETS[repo_name], \
ha='center', \
#arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.95', color=repo_color)
)
repo_scatter[repo_name] = scatter
i += 1
print(repo_name)
print(f"\tpubs:\t{c_pubs}")
print(f"\ttools:\t{c_tools}")
print(f"\tcitations:\t{sum(pre_citations) + sum(post_citations)}")
#for x,y in zip(xs,ys):
# plt.annotate(f"({x}, {y})", # Label
# (x,y),
# textcoords="offset points", # how to position the text
# xytext=(0,10), # distance from text to points (x,y)
# ha='center') # horizontal alignment can be left, right or center
# The default range of plt when `s` is set in the `scatter`
# method does not keep all the points in the canvas; so their
# values are overridden.
ax.set_ylim(bottom=128, top=max_y + (max_y * 0.5))
ax.set_xlim(left=128, right=max_x + (max_x * 0.5))
ax.set_xscale('log', basex=2)
ax.set_yscale('log', basey=2)
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
ax.set_xlabel("\nPublications Count")
ax.set_ylabel("Tools Count\n")
# It is required to add legend through `add_artist` for it not be overridden by the second legend.
#ax.legend(repo_scatter.values(), repo_scatter.keys(), scatterpoints=1, loc='upper left', ncol=2)
#ax.add_artist(l1)
#l2 = ax.legend(cluster_scatter.values(), cluster_scatter.keys(), scatterpoints=1, loc='upper left', ncol=2, title="Clusters")
image_file = os.path.join(input_path, 'plot_pub_tool.png')
if os.path.isfile(image_file):
os.remove(image_file)
plt.savefig(image_file, bbox_inches='tight')
plt.close()
def set_plot_style(nrows, ncols, fig_height=5, fig_width=6):
sns.set()
sns.set_context("paper")
sns.set_style("darkgrid")
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(fig_width, fig_height), dpi=600)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
return fig, axes
def run(input_path):
filenames, repositories = get_clustered_repositories(input_path)
plot(input_path, filenames, repositories)
plot_clustered(input_path, filenames, repositories)
| [
"pandas.read_csv",
"seaborn.set_style",
"t_test_clustered_data.get_clusters",
"os.walk",
"os.remove",
"seaborn.set",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.savefig",
"seaborn.set_context",
"os.path.splitext",
"os.path.isfile",
"t_test_clustered_data.get_vectors",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.subplots_adjust",
"plot_gain_scores.get_cluster_label",
"os.path.join",
"plot_pubs_in_clusters.get_color",
"matplotlib.pyplot.subplots",
"t_test_clustered_data.get_repo_name"
] | [((1758, 1777), 'os.walk', 'os.walk', (['input_path'], {}), '(input_path)\n', (1765, 1777), False, 'import os\n'), ((2215, 2233), 't_test_clustered_data.get_vectors', 'get_vectors', (['tools'], {}), '(tools)\n', (2226, 2233), False, 'from t_test_clustered_data import get_sorted_clusters, get_vectors, get_clusters, CLUSTERED_FILENAME_POSFIX, get_repo_name\n'), ((2685, 2707), 't_test_clustered_data.get_clusters', 'get_clusters', (['filename'], {}), '(filename)\n', (2697, 2707), False, 'from t_test_clustered_data import get_clusters\n'), ((5455, 5510), 'os.path.join', 'os.path.join', (['input_path', '"""plot_pub_tool_clustered.png"""'], {}), "(input_path, 'plot_pub_tool_clustered.png')\n", (5467, 5510), False, 'import os\n'), ((5518, 5544), 'os.path.isfile', 'os.path.isfile', (['image_file'], {}), '(image_file)\n', (5532, 5544), False, 'import os\n'), ((5580, 5624), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_file'], {'bbox_inches': '"""tight"""'}), "(image_file, bbox_inches='tight')\n", (5591, 5624), True, 'import matplotlib.pyplot as plt\n'), ((5629, 5640), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5638, 5640), True, 'import matplotlib.pyplot as plt\n'), ((8558, 8603), 'os.path.join', 'os.path.join', (['input_path', '"""plot_pub_tool.png"""'], {}), "(input_path, 'plot_pub_tool.png')\n", (8570, 8603), False, 'import os\n'), ((8611, 8637), 'os.path.isfile', 'os.path.isfile', (['image_file'], {}), '(image_file)\n', (8625, 8637), False, 'import os\n'), ((8673, 8717), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_file'], {'bbox_inches': '"""tight"""'}), "(image_file, bbox_inches='tight')\n", (8684, 8717), True, 'import matplotlib.pyplot as plt\n'), ((8722, 8733), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8731, 8733), True, 'import matplotlib.pyplot as plt\n'), ((8801, 8810), 'seaborn.set', 'sns.set', ([], {}), '()\n', (8808, 8810), True, 'import seaborn as sns\n'), ((8815, 8839), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (8830, 8839), True, 'import seaborn as sns\n'), ((8844, 8869), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (8857, 8869), True, 'import seaborn as sns\n'), ((8886, 8971), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '(fig_width, fig_height)', 'dpi': '(600)'}), '(nrows=nrows, ncols=ncols, figsize=(fig_width, fig_height), dpi=600\n )\n', (8898, 8971), True, 'import matplotlib.pyplot as plt\n'), ((8971, 9014), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.2)', 'hspace': '(0.2)'}), '(wspace=0.2, hspace=0.2)\n', (8990, 9014), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4895), 'matplotlib.ticker.FormatStrFormatter', 'matplotlib.ticker.FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (4889, 4895), False, 'import matplotlib\n'), ((4930, 4972), 'matplotlib.ticker.FormatStrFormatter', 'matplotlib.ticker.FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (4966, 4972), False, 'import matplotlib\n'), ((5554, 5575), 'os.remove', 'os.remove', (['image_file'], {}), '(image_file)\n', (5563, 5575), False, 'import os\n'), ((5929, 5941), 'plot_pubs_in_clusters.get_color', 'get_color', (['i'], {}), '(i)\n', (5938, 5941), False, 'from plot_pubs_in_clusters import get_color\n'), ((6123, 6164), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)', 'sep': '"""\t"""'}), "(filename, header=0, sep='\\t')\n", (6134, 6164), True, 'import pandas as pd\n'), ((6594, 6617), 't_test_clustered_data.get_repo_name', 'get_repo_name', (['filename'], {}), '(filename)\n', (6607, 6617), False, 'from t_test_clustered_data import get_sorted_clusters, get_vectors, get_clusters, CLUSTERED_FILENAME_POSFIX, get_repo_name\n'), ((7981, 8023), 'matplotlib.ticker.FormatStrFormatter', 'matplotlib.ticker.FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (8017, 8023), False, 'import matplotlib\n'), ((8058, 8100), 'matplotlib.ticker.FormatStrFormatter', 'matplotlib.ticker.FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (8094, 8100), False, 'import matplotlib\n'), ((8647, 8668), 'os.remove', 'os.remove', (['image_file'], {}), '(image_file)\n', (8656, 8668), False, 'import os\n'), ((2330, 2346), 'numpy.max', 'np.max', (['citation'], {}), '(citation)\n', (2336, 2346), True, 'import numpy as np\n'), ((2447, 2463), 'numpy.max', 'np.max', (['citation'], {}), '(citation)\n', (2453, 2463), True, 'import numpy as np\n'), ((1983, 2011), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1995, 2011), False, 'import os\n'), ((2049, 2072), 't_test_clustered_data.get_repo_name', 'get_repo_name', (['filename'], {}), '(filename)\n', (2062, 2072), False, 'from t_test_clustered_data import get_sorted_clusters, get_vectors, get_clusters, CLUSTERED_FILENAME_POSFIX, get_repo_name\n'), ((4140, 4152), 'plot_pubs_in_clusters.get_color', 'get_color', (['i'], {}), '(i)\n', (4149, 4152), False, 'from plot_pubs_in_clusters import get_color\n'), ((4234, 4257), 't_test_clustered_data.get_repo_name', 'get_repo_name', (['filename'], {}), '(filename)\n', (4247, 4257), False, 'from t_test_clustered_data import get_sorted_clusters, get_vectors, get_clusters, CLUSTERED_FILENAME_POSFIX, get_repo_name\n'), ((4379, 4414), 'plot_gain_scores.get_cluster_label', 'get_cluster_label', (['cluster_count', 'k'], {}), '(cluster_count, k)\n', (4396, 4414), False, 'from plot_gain_scores import get_cluster_label\n'), ((1825, 1851), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1841, 1851), False, 'import os\n'), ((1883, 1909), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1899, 1909), False, 'import os\n')] |
from django.urls import path
from app import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
path('api/blog', views.blog_list.as_view()),
path('api/blog/<int:pk>', views.blog_detail.as_view()),
path('api/blog/user/<int:pk>', views.User_Blog.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"app.views.blog_list.as_view",
"app.views.blog_detail.as_view",
"rest_framework.urlpatterns.format_suffix_patterns",
"app.views.User_Blog.as_view"
] | [((321, 356), 'rest_framework.urlpatterns.format_suffix_patterns', 'format_suffix_patterns', (['urlpatterns'], {}), '(urlpatterns)\n', (343, 356), False, 'from rest_framework.urlpatterns import format_suffix_patterns\n'), ((152, 177), 'app.views.blog_list.as_view', 'views.blog_list.as_view', ([], {}), '()\n', (175, 177), False, 'from app import views\n'), ((210, 237), 'app.views.blog_detail.as_view', 'views.blog_detail.as_view', ([], {}), '()\n', (235, 237), False, 'from app import views\n'), ((275, 300), 'app.views.User_Blog.as_view', 'views.User_Blog.as_view', ([], {}), '()\n', (298, 300), False, 'from app import views\n')] |
import time
import numpy
import uuid
from datetime import timedelta
from gi.repository import Gtk, Gdk, Gio
from mxdc import Registry, IBeamline, Property, Object
from mxdc.devices.goniometer import GonioFeatures
from mxdc.devices.detector import DetectorFeatures
from mxdc.utils import gui, converter, datatools, glibref
from mxdc.utils.datatools import StrategyType, Strategy, ScreeningAngles, ScreeningRange
from mxdc.utils.gui import Validator
def skips(wedge, delta, first=1, start_angs=(0,), range_end=360):
"""
Calculate the skip ranges based on
:param wedge: angle range for each wedge
:param delta: angle per frame
:param first: first frame index
:param start_angs: tuple of start_angles
:return: string representation of frame number ranges to skip
"""
end_angs = numpy.array(start_angs)
start_angs = end_angs + wedge
end_angs[:-1] = end_angs[1:]
end_angs[-1] = range_end
starts = first + (start_angs/delta).astype(int)
ends = first + (end_angs/delta).astype(int) - 1
return ','.join((f'{lo}-{hi}' for lo,hi in zip(starts, ends)))
def calculate_skip(strategy, total_range, delta, first):
if strategy in [StrategyType.SCREEN_1, StrategyType.SCREEN_2, StrategyType.SCREEN_3, StrategyType.SCREEN_4]:
return skips(
wedge=total_range,
delta=delta,
first=first,
start_angs=ScreeningAngles[strategy],
range_end=ScreeningRange[strategy]
)
else:
return ''
class RunItem(Object):
class StateType:
(ADD, DRAFT, ACTIVE, PAUSED, ERROR, COMPLETE) = range(6)
state = Property(type=int, default=StateType.DRAFT)
position = Property(type=int, default=0)
size = Property(type=int, default=0)
info = Property(type=object)
uuid = Property(type=str, default="")
progress = Property(type=float, default=0.0)
warning = Property(type=str, default="")
header = Property(type=str, default="")
title = Property(type=str, default="Add run ...")
duration = Property(type=int, default=0)
subtitle = Property(type=str, default="")
created = Property(type=float, default=0.0)
def __init__(self, info=None, state=StateType.DRAFT, uid=None, created=None):
super().__init__()
self.connect('notify::info', self.info_changed)
self.props.created = created if created else time.time()
self.props.uuid = uid if uid else str(uuid.uuid4())
self.props.state = state
self.props.info = info
def info_changed(self, *args, **kwargs):
if self.props.info:
self.props.size = datatools.count_frames(self.props.info)
self.props.header = '{} @ {:0.5g} keV'.format(
self.props.info.get('strategy_desc', ''),
self.props.info.get('energy'),
)
self.props.title = self.info['name']
self.props.subtitle = '{} frames, {:0.4g}°/{:0.2g}s{}{}'.format(
self.props.size, self.props.info.get('delta'), self.props.info.get('exposure'),
', {:g}° wedges'.format(self.props.info.get('wedge',720)) if self.props.info.get('wedge',720) < self.props.info.get('range') else '',
', [INV]' if self.props.info.get('inverse') else ''
)
self.props.duration = self.props.size * self.props.info.get('exposure')
def set_progress(self, progress):
state = self.props.state
if state == RunItem.StateType.ADD:
return False
self.props.progress = progress
if progress >= 0.95:
self.props.state = RunItem.StateType.COMPLETE
return state != self.props.state # return True if state changed
@staticmethod
def sorter(a_pointer, b_pointer):
# if objects correctly translated do not translate again
if isinstance(a_pointer, RunItem):
a = a_pointer
b = b_pointer
else:
a = glibref.capi.to_object(a_pointer)
b = glibref.capi.to_object(b_pointer)
if a.props.state == b.props.state == RunItem.StateType.ADD:
return 0
elif a.props.state == RunItem.StateType.ADD:
return 1
elif b.props.state == RunItem.StateType.ADD:
return -1
else:
if a.props.created > b.props.created:
return 1
elif a.props.created < b.props.created:
return -1
else:
return 0
def get_color(self):
return Gdk.RGBA(*STATE_COLORS[self.state])
def __getitem__(self, item):
if self.props.info:
return self.props.info[item]
def __str__(self):
return '<Run Item: {} - {}|{}>'.format(self.props.position, self.props.title, self.props.subtitle)
STATE_COLORS = {
RunItem.StateType.ADD: (1.0, 1.0, 1.0, 0.0),
RunItem.StateType.DRAFT: (1.0, 1.0, 1.0, 0.0),
RunItem.StateType.ACTIVE: (1.0, 1.0, 0.0, 0.2),
RunItem.StateType.PAUSED: (1.0, 1.0, 0.0, 0.1),
RunItem.StateType.COMPLETE: (0.2, 1.0, 0.2, 0.5),
RunItem.StateType.ERROR: (1.0, 0.0, 0.5, 0.1),
}
STATE_PROPERTIES = {
RunItem.StateType.ADD: ('add-tool', 'list-add-symbolic'),
RunItem.StateType.DRAFT: ('draft-run', 'content-loading-symbolic'),
RunItem.StateType.ACTIVE: ('active-run', 'system-run-symbolic'),
RunItem.StateType.PAUSED: ('paused-run', 'media-playback-pause-symbolic'),
RunItem.StateType.COMPLETE: ('complete-run', 'object-select-symbolic'),
RunItem.StateType.ERROR: ('error-run', 'dialog-warning-symbolic'),
}
class DataForm(gui.FormManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# update converters based on beamline configuration
self.beamline = Registry.get_utility(IBeamline)
self.fields['energy'].set_converter(
Validator.Float(*self.beamline.config['energy_range'], self.beamline.config['default_energy'])
)
self.fields['distance'].set_converter(
Validator.Float(*self.beamline.config['distance_limits'], self.beamline.config['default_distance'])
)
self.exposure_rate = 1.
def on_change(self, field, event, name):
super().on_change(field, event, name)
if name == 'delta':
delta = self.get_value('delta')
exposure = delta / self.exposure_rate
self.set_value('exposure', exposure)
elif name == 'exposure':
exposure = self.get_value('exposure')
delta = self.get_value('delta')
self.exposure_rate = delta / exposure
elif name == 'energy':
# calculate resolution limits based on energy
energy = self.get_value('energy')
min_res = converter.dist_to_resol(
self.beamline.config['distance_limits'][0], self.beamline.detector.mm_size, energy
)
max_res = converter.dist_to_resol(
self.beamline.config['distance_limits'][1], self.beamline.detector.mm_size, energy
)
self.fields['resolution'].set_converter(
Validator.Float(min_res, max_res, default=2.0)
)
resolution = converter.dist_to_resol(self.get_value('distance'), self.beamline.detector.mm_size, energy)
self.set_value('resolution', resolution)
if name == 'resolution':
resolution = self.get_value('resolution')
energy = self.get_value('energy')
distance = converter.resol_to_dist(resolution, self.beamline.detector.mm_size, energy)
self.set_value('distance', distance)
if name == 'strategy':
strategy = self.get_value('strategy')
defaults = Strategy.get(strategy)
default_rate = self.beamline.config['default_delta'] / float(self.beamline.config['default_exposure'])
if 'delta' in defaults and 'exposure' not in defaults:
defaults['exposure'] = defaults['delta'] / default_rate
elif 'exposure' in defaults and 'delta' not in defaults:
defaults['delta'] = default_rate / defaults['exposure']
self.exposure_rate = default_rate
inverse = self.get_field('inverse')
inverse.set_sensitive((strategy == StrategyType.FULL and 'inverse' not in self.disabled))
self.set_values(defaults)
if name == 'inverse':
inverse = self.get_value('inverse')
if inverse:
self.set_value('range', min(180., self.get_value('range')))
if name in ['delta', 'strategy', 'range', 'inverse']:
range_ = self.get_value('range')
inverse = self.get_value('inverse')
if inverse:
range_ = min(180., range_)
self.set_value('range', range_)
strategy = self.get_value('strategy')
delta = self.get_value('delta')
first = self.get_value('first')
skip = calculate_skip(strategy, range_, delta, first)
frames = datatools.calc_num_frames(strategy, delta, range_, skip=skip)
self.set_value('frames', frames)
class DataEditor(gui.BuilderMixin):
gui_roots = {
'data/data_form': ['data_form']
}
class Column:
ID, LABEL, VALUE = range(3)
Fields = (
gui.FieldSpec('resolution', 'entry', '{:0.3g}', Validator.Float(0.5, 50, 2.0)),
gui.FieldSpec('delta', 'entry', '{:0.3g}', Validator.AngleFrac(0.001, 720, 1.)),
gui.FieldSpec('range', 'entry', '{:0.4g}', Validator.Float(0.05, 10000, 1.)),
gui.FieldSpec('start', 'entry', '{:0.4g}', Validator.Float(-360., 360., 0.)),
gui.FieldSpec('wedge', 'entry', '{:0.4g}', Validator.Float(0.05, 720., 720.)),
gui.FieldSpec('energy', 'entry', '{:0.3f}', Validator.Float(1.0, 25.0, 12.66)),
gui.FieldSpec('distance', 'entry', '{:0.1f}', Validator.Float(50., 1000., 200)),
gui.FieldSpec('exposure', 'entry', '{:0.3g}', Validator.Float(0.001, 720., 0.5)),
gui.FieldSpec('attenuation', 'entry', '{:0.3g}', Validator.Float(0, 100, 0.0)),
gui.FieldSpec('first', 'entry', '{}', Validator.Int(1, 10000, 1)),
gui.FieldSpec('frames', 'entry', '{}', Validator.Int(1, 100000, 1)),
gui.FieldSpec('name', 'entry', '{}', Validator.Slug(30)),
gui.FieldSpec('strategy', 'cbox', '{}', Validator.Int(None, None, StrategyType.SINGLE)),
gui.FieldSpec('inverse', 'check', '{}', Validator.Bool(False)),
gui.FieldSpec('p0', 'mbox', '{}', Validator.Int(None, None)),
gui.FieldSpec('p1', 'mbox', '{}', Validator.Int(None, None)),
gui.FieldSpec('vector_size', 'spin', '{}', Validator.Int(1, 100, 10)),
)
disabled = ()
use_dialog = False
def __init__(self):
self.setup_gui()
self.beamline = Registry.get_utility(IBeamline)
self.points = Gtk.ListStore(int, str, object)
if not self.beamline.detector.supports(DetectorFeatures.WEDGING):
self.disabled += ('first', )
self.form = DataForm(self, fields=self.Fields, prefix='data', persist=False, disabled=self.disabled)
self.new_run = True
self.run_index = 0
self.item = None
self.item_links = []
self.handlers = {}
self.build_gui()
self.exposure_rate = 1.0
self.dir_template_btn.connect('clicked', self.on_dir_template)
def set_item(self, item):
if self.item:
for link in self.item_links:
self.item.handler_disconnect(link)
self.item = item
self.update()
self.item_links = [
self.item.connect('notify::state', self.update),
self.item.connect('notify::info', self.update),
]
def configure(self, info):
info['frames'] = datatools.count_frames(info)
info['distance'] = converter.resol_to_dist(info['resolution'], self.beamline.detector.mm_size, info['energy'])
min_res = converter.dist_to_resol(
self.beamline.config['distance_limits'][0], self.beamline.detector.mm_size, info['energy']
)
max_res = converter.dist_to_resol(
self.beamline.config['distance_limits'][1], self.beamline.detector.mm_size, info['energy']
)
self.form.fields['resolution'].set_converter(
Validator.Float(min_res, max_res, default=2.0)
)
defaults = self.get_default(info['strategy'])
defaults.update(info)
self.form.set_values(info)
# disable/enable inverse field
inverse = self.form.get_field('inverse')
strategy = self.form.get_value('strategy')
inverse.set_sensitive((strategy == StrategyType.FULL and 'inverse' not in self.disabled))
def get_parameters(self):
info = self.form.get_values()
# Calculate skip,
info.update({
'skip': calculate_skip(info['strategy'], info['range'], info['delta'], info['first']),
'strategy_desc': Strategy[info['strategy']]['desc'],
'activity': Strategy[info['strategy']]['activity'],
})
return info
def get_default(self, strategy_type=StrategyType.SINGLE):
default = self.form.get_defaults()
info = Strategy[strategy_type]
delta, exposure = self.beamline.config['default_delta'], self.beamline.config['default_exposure']
rate = delta / float(exposure)
if 'delta' not in info:
info['delta'] = delta
if 'exposure' not in info:
info['exposure'] = info['delta'] / rate
default.update(info)
default['skip'] = calculate_skip(strategy_type, default['range'], default['delta'], default['first'])
default.update(Strategy[strategy_type])
default['strategy_desc'] = default.pop('desc')
return default
def build_gui(self):
strategy_field = self.form.get_field('strategy')
for id, params in list(Strategy.items()):
strategy_field.append(str(id), params['desc'])
def set_points(self, points):
self.points.clear()
self.points.append([0, '', None])
for i, point in enumerate(points):
self.points.append([i, 'P{}'.format(i + 1), tuple(point)])
def get_point(self, index):
for i, row in enumerate(self.points):
if i == index:
return row[self.Column.VALUE]
def on_dir_template(self, btn):
app = Gio.Application.get_default()
app.window.activate_action('preferences')
def update(self, *args, **kwargs):
if self.item.props.state == RunItem.StateType.ADD:
self.run_label.set_text('New Run')
self.data_delete_btn.set_sensitive(False)
self.data_copy_btn.set_sensitive(False)
self.data_recycle_btn.set_sensitive(False)
self.data_form.set_sensitive(False)
else:
self.run_label.set_text('Edit Run')
self.configure(self.item.info)
self.data_delete_btn.set_sensitive(True)
self.data_copy_btn.set_sensitive(True)
self.data_recycle_btn.set_sensitive(True)
self.data_form.set_sensitive(True)
def has_changed(self, new_values):
if self.item and self.item.info:
info = self.item.info
return any(v != new_values.get(k) for k, v in list(info.items()))
elif self.item:
return True
return False
class RunEditor(DataEditor):
def build_gui(self):
super().build_gui()
self.points.connect('row-changed', self.on_points_updated)
self.points.connect('row-deleted', self.on_points_updated)
self.points.connect('row-inserted', self.on_points_updated)
adjustment = Gtk.Adjustment(10, 1, 100, 1, 5, 0)
self.data_vector_size_spin.set_adjustment(adjustment)
self.data_p1_mbox.bind_property(
'active-id', self.data_vector_size_spin, 'sensitive', 0, lambda *args: bool(args[1])
)
# self.data_vector_size_spin.bind_property(
# 'sensitive', self.data_wedge_entry, 'sensitive', 0, lambda *args: not args[1]
# )
for i, name in enumerate(['p0', 'p1']):
field = self.form.get_field(name)
if not field: continue
renderer_text = Gtk.CellRendererText()
field.pack_start(renderer_text, True)
field.add_attribute(renderer_text, "text", self.Column.LABEL)
field.set_model(self.points)
field.set_id_column(self.Column.LABEL)
# field.connect('changed', self.sync_choices, choice_column)
def on_points_updated(self, *args, **kwargs):
num_points = len(self.points)
self.data_vector_box.set_sensitive(num_points > 1)
def configure(self, info):
super().configure(info)
# disable/enable point fields
num_points = len(self.points)
self.data_vector_box.set_sensitive(num_points > 1)
vector_size = self.form.get_field('vector_size')
if self.beamline.goniometer.supports(GonioFeatures.SCAN4D):
self.form.set_value('vector_size', 1)
vector_size.set_sensitive(False)
else:
vector_size.set_sensitive(True)
class DataDialog(DataEditor):
gui_roots = {
'data/data_dialog': ['data_dialog'],
'data/data_form': ['data_form_fields'],
}
disabled = ('name', 'inverse', 'energy')
use_dialog = True
def build_gui(self):
self.popover = self.data_dialog
self.content_box.pack_start(self.data_form_fields, True, True, 0)
super().build_gui()
self.data_cancel_btn.connect('clicked', lambda x: self.popover.hide())
self.data_save_btn.connect_after('clicked', lambda x: self.popover.hide())
class RunConfig(gui.Builder):
gui_roots = {
'data/data_form': ['saved_run_row']
}
ROW_SIZE_GROUP = Gtk.SizeGroup(Gtk.SizeGroupMode.VERTICAL)
def get_widget(self):
row = Gtk.ListBoxRow()
self.ROW_SIZE_GROUP.add_widget(row)
row.get_style_context().add_class('run-row')
row.add(self.saved_run_row)
self.data_duration_box.set_no_show_all(True)
self.update()
return row
def set_item(self, item):
self.item = item
for param in ['state', 'title', 'progress', 'subtitle', 'info', 'position']:
item.connect('notify::{}'.format(param), self.on_item_changed)
def on_item_changed(self, item, param):
self.update()
def update(self):
style_context = self.saved_run_row.get_style_context()
for state, (style_class, icon_name) in STATE_PROPERTIES.items():
if self.item.state == state:
style_context.add_class(style_class)
self.data_icon.set_from_icon_name(icon_name, Gtk.IconSize.SMALL_TOOLBAR)
else:
style_context.remove_class(style_class)
if self.item.state == self.item.StateType.ADD:
self.data_header.set_text('')
self.data_title.set_markup('Add run ...')
self.data_subtitle.set_text('')
self.data_duration.set_text('')
self.data_duration_box.set_visible(False)
else:
self.data_header.set_text(self.item.header)
self.data_title.set_markup(f'<small><b>{self.item.title}</b></small>')
self.data_subtitle.set_markup(f'<small>{self.item.subtitle}</small>')
dur = timedelta(seconds=self.item.duration)
self.data_duration.set_markup(f'<small><tt>{dur}</tt></small>')
self.data_duration_box.set_visible(True)
| [
"gi.repository.Gtk.ListBoxRow",
"mxdc.utils.gui.Validator.Int",
"mxdc.utils.datatools.calc_num_frames",
"mxdc.utils.gui.Validator.Bool",
"numpy.array",
"mxdc.Registry.get_utility",
"mxdc.utils.converter.dist_to_resol",
"mxdc.utils.datatools.count_frames",
"mxdc.utils.gui.Validator.Slug",
"gi.repository.Gtk.Adjustment",
"datetime.timedelta",
"gi.repository.Gtk.ListStore",
"mxdc.Property",
"gi.repository.Gtk.SizeGroup",
"mxdc.utils.converter.resol_to_dist",
"mxdc.utils.gui.Validator.Float",
"mxdc.utils.datatools.Strategy.items",
"uuid.uuid4",
"gi.repository.Gio.Application.get_default",
"time.time",
"mxdc.utils.glibref.capi.to_object",
"gi.repository.Gdk.RGBA",
"mxdc.utils.gui.Validator.AngleFrac",
"mxdc.utils.datatools.Strategy.get",
"gi.repository.Gtk.CellRendererText"
] | [((813, 836), 'numpy.array', 'numpy.array', (['start_angs'], {}), '(start_angs)\n', (824, 836), False, 'import numpy\n'), ((1639, 1682), 'mxdc.Property', 'Property', ([], {'type': 'int', 'default': 'StateType.DRAFT'}), '(type=int, default=StateType.DRAFT)\n', (1647, 1682), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1698, 1727), 'mxdc.Property', 'Property', ([], {'type': 'int', 'default': '(0)'}), '(type=int, default=0)\n', (1706, 1727), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1739, 1768), 'mxdc.Property', 'Property', ([], {'type': 'int', 'default': '(0)'}), '(type=int, default=0)\n', (1747, 1768), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1780, 1801), 'mxdc.Property', 'Property', ([], {'type': 'object'}), '(type=object)\n', (1788, 1801), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1813, 1843), 'mxdc.Property', 'Property', ([], {'type': 'str', 'default': '""""""'}), "(type=str, default='')\n", (1821, 1843), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1859, 1892), 'mxdc.Property', 'Property', ([], {'type': 'float', 'default': '(0.0)'}), '(type=float, default=0.0)\n', (1867, 1892), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1907, 1937), 'mxdc.Property', 'Property', ([], {'type': 'str', 'default': '""""""'}), "(type=str, default='')\n", (1915, 1937), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1951, 1981), 'mxdc.Property', 'Property', ([], {'type': 'str', 'default': '""""""'}), "(type=str, default='')\n", (1959, 1981), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((1994, 2035), 'mxdc.Property', 'Property', ([], {'type': 'str', 'default': '"""Add run ..."""'}), "(type=str, default='Add run ...')\n", (2002, 2035), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((2051, 2080), 'mxdc.Property', 'Property', ([], {'type': 'int', 'default': '(0)'}), '(type=int, default=0)\n', (2059, 2080), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((2096, 2126), 'mxdc.Property', 'Property', ([], {'type': 'str', 'default': '""""""'}), "(type=str, default='')\n", (2104, 2126), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((2141, 2174), 'mxdc.Property', 'Property', ([], {'type': 'float', 'default': '(0.0)'}), '(type=float, default=0.0)\n', (2149, 2174), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((18024, 18065), 'gi.repository.Gtk.SizeGroup', 'Gtk.SizeGroup', (['Gtk.SizeGroupMode.VERTICAL'], {}), '(Gtk.SizeGroupMode.VERTICAL)\n', (18037, 18065), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((4552, 4587), 'gi.repository.Gdk.RGBA', 'Gdk.RGBA', (['*STATE_COLORS[self.state]'], {}), '(*STATE_COLORS[self.state])\n', (4560, 4587), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((5808, 5839), 'mxdc.Registry.get_utility', 'Registry.get_utility', (['IBeamline'], {}), '(IBeamline)\n', (5828, 5839), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((10917, 10948), 'mxdc.Registry.get_utility', 'Registry.get_utility', (['IBeamline'], {}), '(IBeamline)\n', (10937, 10948), False, 'from mxdc import Registry, IBeamline, Property, Object\n'), ((10971, 11002), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['int', 'str', 'object'], {}), '(int, str, object)\n', (10984, 11002), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((11901, 11929), 'mxdc.utils.datatools.count_frames', 'datatools.count_frames', (['info'], {}), '(info)\n', (11923, 11929), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((11957, 12052), 'mxdc.utils.converter.resol_to_dist', 'converter.resol_to_dist', (["info['resolution']", 'self.beamline.detector.mm_size', "info['energy']"], {}), "(info['resolution'], self.beamline.detector.mm_size,\n info['energy'])\n", (11980, 12052), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((12068, 12188), 'mxdc.utils.converter.dist_to_resol', 'converter.dist_to_resol', (["self.beamline.config['distance_limits'][0]", 'self.beamline.detector.mm_size', "info['energy']"], {}), "(self.beamline.config['distance_limits'][0], self.\n beamline.detector.mm_size, info['energy'])\n", (12091, 12188), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((12224, 12344), 'mxdc.utils.converter.dist_to_resol', 'converter.dist_to_resol', (["self.beamline.config['distance_limits'][1]", 'self.beamline.detector.mm_size', "info['energy']"], {}), "(self.beamline.config['distance_limits'][1], self.\n beamline.detector.mm_size, info['energy'])\n", (12247, 12344), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((14543, 14572), 'gi.repository.Gio.Application.get_default', 'Gio.Application.get_default', ([], {}), '()\n', (14570, 14572), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((15859, 15894), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(10)', '(1)', '(100)', '(1)', '(5)', '(0)'], {}), '(10, 1, 100, 1, 5, 0)\n', (15873, 15894), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((18107, 18123), 'gi.repository.Gtk.ListBoxRow', 'Gtk.ListBoxRow', ([], {}), '()\n', (18121, 18123), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((2394, 2405), 'time.time', 'time.time', ([], {}), '()\n', (2403, 2405), False, 'import time\n'), ((2634, 2673), 'mxdc.utils.datatools.count_frames', 'datatools.count_frames', (['self.props.info'], {}), '(self.props.info)\n', (2656, 2673), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((3978, 4011), 'mxdc.utils.glibref.capi.to_object', 'glibref.capi.to_object', (['a_pointer'], {}), '(a_pointer)\n', (4000, 4011), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((4028, 4061), 'mxdc.utils.glibref.capi.to_object', 'glibref.capi.to_object', (['b_pointer'], {}), '(b_pointer)\n', (4050, 4061), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((5897, 5996), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (["*self.beamline.config['energy_range']", "self.beamline.config['default_energy']"], {}), "(*self.beamline.config['energy_range'], self.beamline.config\n ['default_energy'])\n", (5912, 5996), False, 'from mxdc.utils.gui import Validator\n'), ((6061, 6165), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (["*self.beamline.config['distance_limits']", "self.beamline.config['default_distance']"], {}), "(*self.beamline.config['distance_limits'], self.beamline.\n config['default_distance'])\n", (6076, 6165), False, 'from mxdc.utils.gui import Validator\n'), ((7557, 7632), 'mxdc.utils.converter.resol_to_dist', 'converter.resol_to_dist', (['resolution', 'self.beamline.detector.mm_size', 'energy'], {}), '(resolution, self.beamline.detector.mm_size, energy)\n', (7580, 7632), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((7787, 7809), 'mxdc.utils.datatools.Strategy.get', 'Strategy.get', (['strategy'], {}), '(strategy)\n', (7799, 7809), False, 'from mxdc.utils.datatools import StrategyType, Strategy, ScreeningAngles, ScreeningRange\n'), ((9119, 9180), 'mxdc.utils.datatools.calc_num_frames', 'datatools.calc_num_frames', (['strategy', 'delta', 'range_'], {'skip': 'skip'}), '(strategy, delta, range_, skip=skip)\n', (9144, 9180), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((9455, 9484), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(0.5)', '(50)', '(2.0)'], {}), '(0.5, 50, 2.0)\n', (9470, 9484), False, 'from mxdc.utils.gui import Validator\n'), ((9538, 9574), 'mxdc.utils.gui.Validator.AngleFrac', 'Validator.AngleFrac', (['(0.001)', '(720)', '(1.0)'], {}), '(0.001, 720, 1.0)\n', (9557, 9574), False, 'from mxdc.utils.gui import Validator\n'), ((9627, 9660), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(0.05)', '(10000)', '(1.0)'], {}), '(0.05, 10000, 1.0)\n', (9642, 9660), False, 'from mxdc.utils.gui import Validator\n'), ((9713, 9748), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(-360.0)', '(360.0)', '(0.0)'], {}), '(-360.0, 360.0, 0.0)\n', (9728, 9748), False, 'from mxdc.utils.gui import Validator\n'), ((9799, 9834), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(0.05)', '(720.0)', '(720.0)'], {}), '(0.05, 720.0, 720.0)\n', (9814, 9834), False, 'from mxdc.utils.gui import Validator\n'), ((9887, 9920), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(1.0)', '(25.0)', '(12.66)'], {}), '(1.0, 25.0, 12.66)\n', (9902, 9920), False, 'from mxdc.utils.gui import Validator\n'), ((9977, 10011), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(50.0)', '(1000.0)', '(200)'], {}), '(50.0, 1000.0, 200)\n', (9992, 10011), False, 'from mxdc.utils.gui import Validator\n'), ((10066, 10100), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(0.001)', '(720.0)', '(0.5)'], {}), '(0.001, 720.0, 0.5)\n', (10081, 10100), False, 'from mxdc.utils.gui import Validator\n'), ((10159, 10187), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['(0)', '(100)', '(0.0)'], {}), '(0, 100, 0.0)\n', (10174, 10187), False, 'from mxdc.utils.gui import Validator\n'), ((10236, 10262), 'mxdc.utils.gui.Validator.Int', 'Validator.Int', (['(1)', '(10000)', '(1)'], {}), '(1, 10000, 1)\n', (10249, 10262), False, 'from mxdc.utils.gui import Validator\n'), ((10312, 10339), 'mxdc.utils.gui.Validator.Int', 'Validator.Int', (['(1)', '(100000)', '(1)'], {}), '(1, 100000, 1)\n', (10325, 10339), False, 'from mxdc.utils.gui import Validator\n'), ((10387, 10405), 'mxdc.utils.gui.Validator.Slug', 'Validator.Slug', (['(30)'], {}), '(30)\n', (10401, 10405), False, 'from mxdc.utils.gui import Validator\n'), ((10456, 10502), 'mxdc.utils.gui.Validator.Int', 'Validator.Int', (['None', 'None', 'StrategyType.SINGLE'], {}), '(None, None, StrategyType.SINGLE)\n', (10469, 10502), False, 'from mxdc.utils.gui import Validator\n'), ((10553, 10574), 'mxdc.utils.gui.Validator.Bool', 'Validator.Bool', (['(False)'], {}), '(False)\n', (10567, 10574), False, 'from mxdc.utils.gui import Validator\n'), ((10619, 10644), 'mxdc.utils.gui.Validator.Int', 'Validator.Int', (['None', 'None'], {}), '(None, None)\n', (10632, 10644), False, 'from mxdc.utils.gui import Validator\n'), ((10689, 10714), 'mxdc.utils.gui.Validator.Int', 'Validator.Int', (['None', 'None'], {}), '(None, None)\n', (10702, 10714), False, 'from mxdc.utils.gui import Validator\n'), ((10768, 10793), 'mxdc.utils.gui.Validator.Int', 'Validator.Int', (['(1)', '(100)', '(10)'], {}), '(1, 100, 10)\n', (10781, 10793), False, 'from mxdc.utils.gui import Validator\n'), ((12428, 12474), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['min_res', 'max_res'], {'default': '(2.0)'}), '(min_res, max_res, default=2.0)\n', (12443, 12474), False, 'from mxdc.utils.gui import Validator\n'), ((14043, 14059), 'mxdc.utils.datatools.Strategy.items', 'Strategy.items', ([], {}), '()\n', (14057, 14059), False, 'from mxdc.utils.datatools import StrategyType, Strategy, ScreeningAngles, ScreeningRange\n'), ((16418, 16440), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (16438, 16440), False, 'from gi.repository import Gtk, Gdk, Gio\n'), ((19597, 19634), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.item.duration'}), '(seconds=self.item.duration)\n', (19606, 19634), False, 'from datetime import timedelta\n'), ((2452, 2464), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2462, 2464), False, 'import uuid\n'), ((6802, 6914), 'mxdc.utils.converter.dist_to_resol', 'converter.dist_to_resol', (["self.beamline.config['distance_limits'][0]", 'self.beamline.detector.mm_size', 'energy'], {}), "(self.beamline.config['distance_limits'][0], self.\n beamline.detector.mm_size, energy)\n", (6825, 6914), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((6962, 7074), 'mxdc.utils.converter.dist_to_resol', 'converter.dist_to_resol', (["self.beamline.config['distance_limits'][1]", 'self.beamline.detector.mm_size', 'energy'], {}), "(self.beamline.config['distance_limits'][1], self.\n beamline.detector.mm_size, energy)\n", (6985, 7074), False, 'from mxdc.utils import gui, converter, datatools, glibref\n'), ((7169, 7215), 'mxdc.utils.gui.Validator.Float', 'Validator.Float', (['min_res', 'max_res'], {'default': '(2.0)'}), '(min_res, max_res, default=2.0)\n', (7184, 7215), False, 'from mxdc.utils.gui import Validator\n')] |
import torch
from torch import nn
import torch.nn.functional as F
class Residual(nn.Module):
def __init__(self, c, **kw):
super().__init__()
self.res1 = make_conv_bn(c, c, pool=False, **kw)
self.res2 = make_conv_bn(c, c, pool=False, **kw)
def forward(self, x):
return self.res2(self.res1(x)) + x
class FastResNet2(nn.Module):
def __init__(self, weight=1/16):
super().__init__()
self.weight = weight
channels = {'prep': 64, 'layer1': 128, 'layer2': 256, 'layer3': 512}
prep = make_conv_bn(3, channels['prep'], pool=False)
layer1 = make_conv_bn(channels['prep'], channels['layer1'])
layer2 = make_conv_bn(channels['layer1'], channels['layer2'])
layer3 = make_conv_bn(channels['layer2'], channels['layer3'])
pool = nn.MaxPool2d(4)
layer1 = nn.Sequential(layer1, Residual(channels['layer1']))
layer3 = nn.Sequential(layer3, Residual(channels['layer3']))
self.net = nn.Sequential(
prep,
layer1,
layer2,
layer3,
pool
)
self.linear = nn.Linear(channels['layer3'], 10, bias=False)
def forward(self, inputs):
x = self.net(inputs)
x = x.view(x.size(0), x.size(1))
x = self.linear(x)
return self.weight * x
def make_conv_bn(c_in, c_out, pool=True):
bn = nn.BatchNorm2d(c_out, eps=1e-05, momentum=0.1)
bn.weight.data.fill_(1.0)
bn.bias.data.fill_(0.0)
layers = [
nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False),
bn,
nn.ReLU(True),
]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear"
] | [((1403, 1449), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['c_out'], {'eps': '(1e-05)', 'momentum': '(0.1)'}), '(c_out, eps=1e-05, momentum=0.1)\n', (1417, 1449), False, 'from torch import nn\n'), ((1709, 1731), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1722, 1731), False, 'from torch import nn\n'), ((826, 841), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(4)'], {}), '(4)\n', (838, 841), False, 'from torch import nn\n'), ((1001, 1050), 'torch.nn.Sequential', 'nn.Sequential', (['prep', 'layer1', 'layer2', 'layer3', 'pool'], {}), '(prep, layer1, layer2, layer3, pool)\n', (1014, 1050), False, 'from torch import nn\n'), ((1144, 1189), 'torch.nn.Linear', 'nn.Linear', (["channels['layer3']", '(10)'], {'bias': '(False)'}), "(channels['layer3'], 10, bias=False)\n", (1153, 1189), False, 'from torch import nn\n'), ((1532, 1602), 'torch.nn.Conv2d', 'nn.Conv2d', (['c_in', 'c_out'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False)\n', (1541, 1602), False, 'from torch import nn\n'), ((1624, 1637), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1631, 1637), False, 'from torch import nn\n'), ((1680, 1695), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1692, 1695), False, 'from torch import nn\n')] |
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from scipy import stats
class NetworkAnalysis(object):
def __init__(
self, species, species_class, common_name, comadre_reader, asnr_reader
):
self.species = species
self.species_class = species_class
self.common_name = common_name
self.comadre_reader = comadre_reader.species_data
self.asnr_reader = asnr_reader.species_data
self._comadre_report = None
self._asnr_report = None
self._dual_report = None
def create_population_forecast(self, mpm):
growth = mpm.project_species_growth(10)
pops = mpm.study_info.StudyPopulationGroup.values
plt.figure(figsize=(10,5))
for i in range(len(pops)):
population = list(map(lambda x: x[i], growth.values()))
years = range(len(growth))
plt.title("Matrix Population Model Forecast for {}".format(self.species), fontsize=12)
plt.ylabel("Projected Population", fontsize=12)
plt.xlabel("Projected Years", fontsize=12)
plt.plot(years, population, label=pops[i])
plt.legend()
plt.show()
print()
def create_comadre_report(self):
print("Projected population growth for {}".format(self.species))
species_mpm = self.comadre_reader[self.species][0]
self.create_population_forecast(species_mpm)
print("Lifecycle graph for {}".format(self.species))
species_mpm.show_lc_graph()
print()
return
@staticmethod
def print_asnr_metadata(meta_dict):
pos = 0
# get longest key
maxi = 0
for e in meta_dict:
if len(e) > maxi:
maxi = len(e)
if maxi < pos:
maxi = pos
# print dictionary with alignment
for e in meta_dict:
if e not in ["Attributes Available", "Note", "Citation"]:
print(e + " : " + " " * (maxi - len(e)) + meta_dict[e])
def create_asnr_graph(self, graph):
g = graph
weights = list(nx.get_edge_attributes(g, "weight").values())
centralities = np.array(list(nx.degree_centrality(g).values()))
nodes = g.nodes()
pos = nx.spring_layout(g)
plt.figure(figsize=(15, 10))
ec = nx.draw_networkx_edges(
g, pos, edge_color=abs(stats.zscore(weights) * 1000), alpha=0.6, edge_cmap=plt.cm.Reds, width=5
)
nc = nx.draw_networkx_nodes(
g,
pos,
nodelist=nodes,
node_color=centralities,
node_size=abs(stats.zscore(centralities) * 1000),
cmap=plt.cm.Blues,
)
cbar = plt.colorbar(nc, fraction=0.025, pad=0.04)
cbar.set_label("Node Degree Centrality", fontsize=18)
cbar2 = plt.colorbar(ec, fraction=0.04, orientation="horizontal", pad=0.04)
cbar2.set_label("Edge Weight", fontsize=18)
plt.title("Social Contact Network: {}".format(self.species))
plt.axis("off")
plt.show()
def create_asnr_report(self):
print(
"Creating ASNR report for {} with the common name {}".format(
self.species, self.common_name
)
)
print("Finding summary statistics for the {} graphs found...")
data = self.asnr_reader[self.species_class][self.common_name]
print(
"There are {} NetworkX graphs available for {}".format(
len(data["graphs"]), self.species
)
)
meta_dict = data["metadata"]
self.print_asnr_metadata(meta_dict)
print()
print("Showing graph for the social contact graph")
self.create_asnr_graph(data["graphs"][0].graph)
return
def show_reports(self):
self.create_comadre_report()
self.create_asnr_report()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"networkx.spring_layout",
"matplotlib.pyplot.plot",
"networkx.get_edge_attributes",
"networkx.degree_centrality",
"matplotlib.pyplot.figure",
"scipy.stats.zscore",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((722, 749), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (732, 749), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1180), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1178, 1180), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1197, 1199), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2294), 'networkx.spring_layout', 'nx.spring_layout', (['g'], {}), '(g)\n', (2291, 2294), True, 'import networkx as nx\n'), ((2303, 2331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2313, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2740, 2782), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['nc'], {'fraction': '(0.025)', 'pad': '(0.04)'}), '(nc, fraction=0.025, pad=0.04)\n', (2752, 2782), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2928), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ec'], {'fraction': '(0.04)', 'orientation': '"""horizontal"""', 'pad': '(0.04)'}), "(ec, fraction=0.04, orientation='horizontal', pad=0.04)\n", (2873, 2928), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3073), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3066, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3092), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3090, 3092), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1049), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Projected Population"""'], {'fontsize': '(12)'}), "('Projected Population', fontsize=12)\n", (1012, 1049), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1104), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Projected Years"""'], {'fontsize': '(12)'}), "('Projected Years', fontsize=12)\n", (1072, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1159), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'population'], {'label': 'pops[i]'}), '(years, population, label=pops[i])\n', (1125, 1159), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2151), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['g', '"""weight"""'], {}), "(g, 'weight')\n", (2138, 2151), True, 'import networkx as nx\n'), ((2199, 2222), 'networkx.degree_centrality', 'nx.degree_centrality', (['g'], {}), '(g)\n', (2219, 2222), True, 'import networkx as nx\n'), ((2404, 2425), 'scipy.stats.zscore', 'stats.zscore', (['weights'], {}), '(weights)\n', (2416, 2425), False, 'from scipy import stats\n'), ((2647, 2673), 'scipy.stats.zscore', 'stats.zscore', (['centralities'], {}), '(centralities)\n', (2659, 2673), False, 'from scipy import stats\n')] |
import unittest
import pandas as pd
from sklearn.datasets import make_moons
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection._split import train_test_split
from skater.core.explanations import Interpretation
from skater.model import InMemoryModel
from skater.util.logger import _INFO
class TestTreeSurrogates(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Classification use-case
cls.X_c, cls.y_c = make_moons(1000, noise=0.5)
cls.X_c = pd.DataFrame(cls.X_c, columns=['F1', 'F2'])
cls.target_names = ['class 0', 'class 1']
cls.X_train_c, cls.X_test_c, cls.y_train_c, cls.y_test_c = train_test_split(cls.X_c, cls.y_c)
cls.classifier_est = DecisionTreeClassifier(max_depth=5, random_state=5)
cls.classifier_est.fit(cls.X_train_c, cls.y_train_c)
cls.interpreter = Interpretation(cls.X_train_c, feature_names=cls.X_c.columns)
cls.model_inst = InMemoryModel(cls.classifier_est.predict, examples=cls.X_train_c,
model_type='classifier', unique_values=[0, 1], feature_names=cls.X_c.columns,
target_names=cls.target_names, log_level=_INFO)
# all the below tests are with F1-score
def test_surrogate_no_pruning(self):
surrogate_explainer = self.interpreter.tree_surrogate(oracle=self.model_inst, seed=5)
result = surrogate_explainer.fit(self.X_train_c, self.y_train_c, use_oracle=True,
prune=None, scorer_type='default')
self.assertEquals(result < 0, True)
def test_surrogate_with_prepruning(self):
surrogate_explainer = self.interpreter.tree_surrogate(oracle=self.model_inst, seed=5)
result = surrogate_explainer.fit(self.X_train_c, self.y_train_c, use_oracle=True,
prune='pre', scorer_type='f1')
self.assertEquals(result < 0, True)
def test_surrogate_with_postpruning(self):
surrogate_explainer = self.interpreter.tree_surrogate(oracle=self.model_inst, seed=5)
result = surrogate_explainer.fit(self.X_train_c, self.y_train_c, use_oracle=True,
prune='post', scorer_type='f1')
self.assertEquals(result < 0, True)
def test_surrogate_with_cross_entropy(self):
model_inst = InMemoryModel(self.classifier_est.predict_proba, examples=self.X_train_c,
model_type='classifier', feature_names=self.X_c.columns,
target_names=self.target_names, log_level=_INFO, probability=True)
surrogate_explainer = self.interpreter.tree_surrogate(oracle=model_inst, seed=5)
result = surrogate_explainer.fit(self.X_train_c, self.y_train_c, use_oracle=True,
prune='post', scorer_type='default')
self.assertEqual(surrogate_explainer.scorer_name_, 'cross-entropy', True)
self.assertEquals(result != 0, True)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTreeSurrogates)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"skater.model.InMemoryModel",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.model_selection._split.train_test_split",
"sklearn.datasets.make_moons",
"skater.core.explanations.Interpretation",
"pandas.DataFrame",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((465, 492), 'sklearn.datasets.make_moons', 'make_moons', (['(1000)'], {'noise': '(0.5)'}), '(1000, noise=0.5)\n', (475, 492), False, 'from sklearn.datasets import make_moons\n'), ((511, 554), 'pandas.DataFrame', 'pd.DataFrame', (['cls.X_c'], {'columns': "['F1', 'F2']"}), "(cls.X_c, columns=['F1', 'F2'])\n", (523, 554), True, 'import pandas as pd\n'), ((672, 706), 'sklearn.model_selection._split.train_test_split', 'train_test_split', (['cls.X_c', 'cls.y_c'], {}), '(cls.X_c, cls.y_c)\n', (688, 706), False, 'from sklearn.model_selection._split import train_test_split\n'), ((736, 787), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(5)', 'random_state': '(5)'}), '(max_depth=5, random_state=5)\n', (758, 787), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((875, 935), 'skater.core.explanations.Interpretation', 'Interpretation', (['cls.X_train_c'], {'feature_names': 'cls.X_c.columns'}), '(cls.X_train_c, feature_names=cls.X_c.columns)\n', (889, 935), False, 'from skater.core.explanations import Interpretation\n'), ((961, 1161), 'skater.model.InMemoryModel', 'InMemoryModel', (['cls.classifier_est.predict'], {'examples': 'cls.X_train_c', 'model_type': '"""classifier"""', 'unique_values': '[0, 1]', 'feature_names': 'cls.X_c.columns', 'target_names': 'cls.target_names', 'log_level': '_INFO'}), "(cls.classifier_est.predict, examples=cls.X_train_c,\n model_type='classifier', unique_values=[0, 1], feature_names=cls.X_c.\n columns, target_names=cls.target_names, log_level=_INFO)\n", (974, 1161), False, 'from skater.model import InMemoryModel\n'), ((2391, 2597), 'skater.model.InMemoryModel', 'InMemoryModel', (['self.classifier_est.predict_proba'], {'examples': 'self.X_train_c', 'model_type': '"""classifier"""', 'feature_names': 'self.X_c.columns', 'target_names': 'self.target_names', 'log_level': '_INFO', 'probability': '(True)'}), "(self.classifier_est.predict_proba, examples=self.X_train_c,\n model_type='classifier', feature_names=self.X_c.columns, target_names=\n self.target_names, log_level=_INFO, probability=True)\n", (2404, 2597), False, 'from skater.model import InMemoryModel\n'), ((3084, 3105), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (3103, 3105), False, 'import unittest\n'), ((3152, 3188), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (3175, 3188), False, 'import unittest\n')] |
#!/usr/bin/env python
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
# Download and load the training data
trainset = datasets.MNIST('MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.MNIST('MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
image, label = next(iter(trainloader))
helper.imshow(image[0,:])
| [
"helper.imshow",
"torch.utils.data.DataLoader",
"torchvision.transforms.Normalize",
"torchvision.datasets.MNIST",
"torchvision.transforms.ToTensor"
] | [((312, 389), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""MNIST_data/"""'], {'download': '(True)', 'train': '(True)', 'transform': 'transform'}), "('MNIST_data/', download=True, train=True, transform=transform)\n", (326, 389), False, 'from torchvision import datasets, transforms\n'), ((404, 470), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(trainset, batch_size=64, shuffle=True)\n', (431, 470), False, 'import torch\n'), ((516, 594), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""MNIST_data/"""'], {'download': '(True)', 'train': '(False)', 'transform': 'transform'}), "('MNIST_data/', download=True, train=False, transform=transform)\n", (530, 594), False, 'from torchvision import datasets, transforms\n'), ((608, 673), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(testset, batch_size=64, shuffle=True)\n', (635, 673), False, 'import torch\n'), ((715, 741), 'helper.imshow', 'helper.imshow', (['image[0, :]'], {}), '(image[0, :])\n', (728, 741), False, 'import helper\n'), ((171, 192), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (190, 192), False, 'from torchvision import datasets, transforms\n'), ((226, 260), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (246, 260), False, 'from torchvision import datasets, transforms\n')] |
import Foundation
from PyObjCTools.TestSupport import TestCase
class TestSubclassing(TestCase):
def testBasicSubclassing(self):
class NSObjectSubclass(Foundation.NSObject):
def someRandomMethod(self):
return 42
subclassClass = Foundation.NSClassFromString("NSObjectSubclass")
self.assertIsNot(subclassClass, None, "Failed to subclass NSObject.")
subclassInstance = subclassClass.new()
self.assertIsInstance(subclassInstance, subclassClass)
self.assertIsInstance(subclassInstance, Foundation.NSObject)
self.assertNotIsInstance(subclassInstance, Foundation.NSArray)
subclassInstance.description()
self.assertEqual(subclassInstance.someRandomMethod(), 42)
self.assertIs(subclassInstance, subclassInstance, "Identity check failed.")
self.assertIs(
subclassInstance, subclassInstance.self(), "Identity check failed."
)
| [
"Foundation.NSClassFromString"
] | [((278, 326), 'Foundation.NSClassFromString', 'Foundation.NSClassFromString', (['"""NSObjectSubclass"""'], {}), "('NSObjectSubclass')\n", (306, 326), False, 'import Foundation\n')] |
from conans import ConanFile, CMake, tools
class JsonBuilderConan(ConanFile):
name="json-builder"
version='1.0.0'
exports_sources="*"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
def requirements(self):
self.requires("json-parser/1.0.0")
def imports(self):
self.copy(".h*", src='include')
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
if self.options.shared is True:
self.copy("*.so", dst="lib", keep_path=False)
else:
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs=["jsonbuilder"]
| [
"conans.CMake"
] | [((500, 511), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (505, 511), False, 'from conans import ConanFile, CMake, tools\n')] |
'''
First portion of Network
Input: takes 28X28 images from MNIST and EMNIST data
Output: 12 size vetctor predicting the probability of image belonging to 12 classes <0,1...9, A,M>
'''
from __future__ import print_function
import keras
import numpy as np
import os
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import load_model
import ipdb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
batch_size = 128
num_classes = 12
epochs = 10
# input image dimensions
img_rows, img_cols = 28,28
def load_data(path, val_perc = 0.1):
data_txt = np.genfromtxt(os.path.join(path, 'all_ocv.ocv'), dtype=np.int32, delimiter=" ", skip_header=1)
nSamples = data_txt.shape[0]
np.random.shuffle(data_txt)
x = data_txt[:,2:].reshape(-1,img_rows,img_cols)
y = data_txt[:,0]
valLim = int(val_perc*nSamples)
x_train = x[valLim:]
y_train = y[valLim:]
x_test = x[:valLim]
y_test = y[:valLim]
return (x_train,y_train), (x_test,y_test)
def load_test_prediction():
data_txt = np.genfromtxt('test.ocv', dtype=np.int32, delimiter=" ", skip_header=1)
nSamples = data_txt.shape[0]
x = data_txt[:,2:].reshape(-1,img_rows,img_cols)
return x
def WriteTestLabels(predicted_y, mapping_81, file_name):
total_size = predicted_y.size
print("Total images test data: ", str(total_size))
data_labels = []
for i in range(total_size):
print(predicted_y[i])
print(mapping_81[int(predicted_y[i])])
data_labels.append(mapping_81[int(predicted_y[i])])
with open(file_name, "w") as f:
f.write("Id,Label")
for i in range(10000):
f.write("\n")
f.write("{0},{1}".format(str(i+1), str(int(data_labels[i]))))
print("Done writing labels in Test File")
def AddRandomNoise(images):
#adds noise ranging from 1 to 255
#max noise added to 10% of image
num_images = images.shape[0]
size_im = images.shape[1]
images = images.reshape(num_images, size_im*size_im)
for im_index in range(num_images):
im_size = images[im_index].size
max_noise = int(im_size * 0.25)
min_noise = int(im_size * 0.15)
num_elements_noise_added = np.random.randint(min_noise, max_noise)
indexed_noisy_image=np.random.choice(im_size, num_elements_noise_added, replace=False)
for i in indexed_noisy_image:
images[im_index,i] = np.random.randint(0,255)
images = images.reshape(num_images, size_im, size_im)
return images
def PlotHistory(history):
#history of accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig( "./accuracy.png")
plt.close()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig( "./loss.png")
plt.close()
path = '/home/ml/ajain25/Documents/Courses/AML/Project_3/Keras/MNSIT_Data'
curr_path ='/home/ml/ajain25/Documents/Courses/AML/Project_3/Keras/MNSIT_Data/MNIST_rotated'
if os.path.isfile(os.path.join(curr_path,'x_train.npy')):
x_train = np.load(os.path.join(curr_path, 'x_train.npy'))
y_train = np.load(os.path.join(curr_path, 'y_train.npy'))
x_test = np.load(os.path.join(curr_path, 'x_test.npy'))
y_test = np.load(os.path.join(curr_path, 'y_test.npy'))
else:
(x_train, y_train), (x_test, y_test) = load_data(path)
np.save(os.path.join(curr_path,'x_train'),x_train)
np.save(os.path.join(curr_path,'y_train'),y_train)
np.save(os.path.join(curr_path,'x_test'),x_test)
np.save(os.path.join(curr_path,'y_test'),y_test)
#Add noise to train data
num_images_to_add_noise = int(x_train.shape[0] * 0.15)
noisy_images = x_train
x_train = AddRandomNoise(noisy_images)
#Add noise to test data
num_images_to_add_noise = int(x_test.shape[0] * 0.20)
noisy_images = x_test[:num_images_to_add_noise]
x_test[:num_images_to_add_noise] = AddRandomNoise(noisy_images)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Model for first portion of network
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save('CNN_FirstPortion_segregated_image.h5')
PlotHistory(history)
x_predict = load_test_prediction().astype('float32')
if K.image_data_format() == 'channels_first':
x_predict = x_predict.reshape(x_predict.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_predict = x_predict.reshape(x_predict.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_predict /= 255
labels = model.predict(x_predict, batch_size=128)
predicted_labels = np.argmax(labels, axis=1)
WriteTestLabels(predicted_labels, mapping_81, "./TestPredicted.csv")
| [
"keras.layers.Conv2D",
"matplotlib.pyplot.ylabel",
"keras.utils.to_categorical",
"keras.layers.Dense",
"numpy.genfromtxt",
"keras.optimizers.Adadelta",
"keras.backend.image_data_format",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.savefig",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"matplotlib.use",
"numpy.random.choice",
"numpy.argmax",
"keras.models.Sequential",
"matplotlib.pyplot.title",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"os.path.join",
"numpy.random.randint",
"numpy.random.shuffle"
] | [((497, 518), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (511, 518), False, 'import matplotlib\n'), ((4860, 4908), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (4886, 4908), False, 'import keras\n'), ((4918, 4965), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (4944, 4965), False, 'import keras\n'), ((5013, 5025), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5023, 5025), False, 'from keras.models import Sequential\n'), ((6513, 6538), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (6522, 6538), True, 'import numpy as np\n'), ((830, 857), 'numpy.random.shuffle', 'np.random.shuffle', (['data_txt'], {}), '(data_txt)\n', (847, 857), True, 'import numpy as np\n'), ((1140, 1211), 'numpy.genfromtxt', 'np.genfromtxt', (['"""test.ocv"""'], {'dtype': 'np.int32', 'delimiter': '""" """', 'skip_header': '(1)'}), "('test.ocv', dtype=np.int32, delimiter=' ', skip_header=1)\n", (1153, 1211), True, 'import numpy as np\n'), ((2576, 2608), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (2584, 2608), True, 'import matplotlib.pyplot as plt\n'), ((2611, 2647), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (2619, 2647), True, 'import matplotlib.pyplot as plt\n'), ((2650, 2677), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (2659, 2677), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2702), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (2690, 2702), True, 'import matplotlib.pyplot as plt\n'), ((2705, 2724), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2715, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2773), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (2737, 2773), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2805), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./accuracy.png"""'], {}), "('./accuracy.png')\n", (2787, 2805), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2820), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2818, 2820), True, 'import matplotlib.pyplot as plt\n'), ((2857, 2890), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (2865, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2930), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (2901, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2933, 2956), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (2942, 2956), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2977), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2969, 2977), True, 'import matplotlib.pyplot as plt\n'), ((2980, 2999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2990, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3002, 3048), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (3012, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3076), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./loss.png"""'], {}), "('./loss.png')\n", (3062, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3091), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3089, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3281, 3319), 'os.path.join', 'os.path.join', (['curr_path', '"""x_train.npy"""'], {}), "(curr_path, 'x_train.npy')\n", (3293, 3319), False, 'import os\n'), ((4170, 4191), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4189, 4191), True, 'from keras import backend as K\n'), ((5036, 5110), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (5042, 5110), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5156, 5193), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (5162, 5193), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5205, 5235), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5217, 5235), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5248, 5285), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (5254, 5285), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5297, 5334), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (5303, 5334), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5346, 5376), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5358, 5376), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5389, 5398), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5396, 5398), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5410, 5439), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (5415, 5439), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5451, 5464), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5458, 5464), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5476, 5505), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (5481, 5505), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5517, 5529), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5524, 5529), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5541, 5581), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (5546, 5581), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6138, 6159), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (6157, 6159), True, 'from keras import backend as K\n'), ((716, 749), 'os.path.join', 'os.path.join', (['path', '"""all_ocv.ocv"""'], {}), "(path, 'all_ocv.ocv')\n", (728, 749), False, 'import os\n'), ((2232, 2271), 'numpy.random.randint', 'np.random.randint', (['min_noise', 'max_noise'], {}), '(min_noise, max_noise)\n', (2249, 2271), True, 'import numpy as np\n'), ((2296, 2362), 'numpy.random.choice', 'np.random.choice', (['im_size', 'num_elements_noise_added'], {'replace': '(False)'}), '(im_size, num_elements_noise_added, replace=False)\n', (2312, 2362), True, 'import numpy as np\n'), ((3342, 3380), 'os.path.join', 'os.path.join', (['curr_path', '"""x_train.npy"""'], {}), "(curr_path, 'x_train.npy')\n", (3354, 3380), False, 'import os\n'), ((3402, 3440), 'os.path.join', 'os.path.join', (['curr_path', '"""y_train.npy"""'], {}), "(curr_path, 'y_train.npy')\n", (3414, 3440), False, 'import os\n'), ((3461, 3498), 'os.path.join', 'os.path.join', (['curr_path', '"""x_test.npy"""'], {}), "(curr_path, 'x_test.npy')\n", (3473, 3498), False, 'import os\n'), ((3519, 3556), 'os.path.join', 'os.path.join', (['curr_path', '"""y_test.npy"""'], {}), "(curr_path, 'y_test.npy')\n", (3531, 3556), False, 'import os\n'), ((3631, 3665), 'os.path.join', 'os.path.join', (['curr_path', '"""x_train"""'], {}), "(curr_path, 'x_train')\n", (3643, 3665), False, 'import os\n'), ((3684, 3718), 'os.path.join', 'os.path.join', (['curr_path', '"""y_train"""'], {}), "(curr_path, 'y_train')\n", (3696, 3718), False, 'import os\n'), ((3737, 3770), 'os.path.join', 'os.path.join', (['curr_path', '"""x_test"""'], {}), "(curr_path, 'x_test')\n", (3749, 3770), False, 'import os\n'), ((3788, 3821), 'os.path.join', 'os.path.join', (['curr_path', '"""y_test"""'], {}), "(curr_path, 'y_test')\n", (3800, 3821), False, 'import os\n'), ((5666, 5693), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (5691, 5693), False, 'import keras\n'), ((2425, 2450), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2442, 2450), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import io
import tokenize
from pathlib import Path
from typing import Iterable
from libcst.testing.utils import UnitTest
from fixit.common.base import LintConfig
from fixit.common.pseudo_rule import PseudoContext, PseudoLintRule
from fixit.common.report import BaseLintRuleReport
from fixit.rule_lint_engine import lint_file
DUMMY_FILE_PATH = Path(__file__)
DUMMY_SOURCE = b"pass\npass\npass\n"
DUMMY_LINT_CODE = "DummyLintRule"
DUMMY_LINT_MESSAGE = "dummy lint message"
class PseudoContextTest(UnitTest):
def setUp(self) -> None:
self.dummy_tokens = tuple(tokenize.tokenize(io.BytesIO(DUMMY_SOURCE).readline))
self.dummy_ast_tree = ast.parse(DUMMY_SOURCE)
def test_tokens(self) -> None:
full_context = PseudoContext(
file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE, tokens=self.dummy_tokens
)
self.assertIs(full_context.tokens, self.dummy_tokens)
partial_context = PseudoContext(file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE)
self.assertEqual(partial_context.tokens, self.dummy_tokens)
self.assertIsNot(partial_context.tokens, self.dummy_tokens)
def test_ast_tree(self) -> None:
full_context = PseudoContext(
file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE, ast_tree=self.dummy_ast_tree
)
self.assertIs(full_context.ast_tree, self.dummy_ast_tree)
partial_context = PseudoContext(file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE)
# partial_context.ast_tree should be equivalent to self.dummy_ast_tree
self.assertIsNot(partial_context.ast_tree, self.dummy_ast_tree)
class PseudoLintRuleTest(UnitTest):
def test_pseudo_lint_rule(self) -> None:
class DummyLintRuleReport(BaseLintRuleReport):
pass
dummy_report = DummyLintRuleReport(
file_path=DUMMY_FILE_PATH,
code=DUMMY_LINT_CODE,
message=DUMMY_LINT_MESSAGE,
line=1,
column=0,
)
class DummyPseudoLintRule(PseudoLintRule):
def lint_file(self) -> Iterable[BaseLintRuleReport]:
return [dummy_report]
reports = lint_file(
DUMMY_FILE_PATH,
DUMMY_SOURCE,
config=LintConfig(),
rules={DummyPseudoLintRule},
)
self.assertEqual(reports, [dummy_report])
| [
"pathlib.Path",
"io.BytesIO",
"fixit.common.pseudo_rule.PseudoContext",
"ast.parse",
"fixit.common.base.LintConfig"
] | [((536, 550), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (540, 550), False, 'from pathlib import Path\n'), ((848, 871), 'ast.parse', 'ast.parse', (['DUMMY_SOURCE'], {}), '(DUMMY_SOURCE)\n', (857, 871), False, 'import ast\n'), ((931, 1023), 'fixit.common.pseudo_rule.PseudoContext', 'PseudoContext', ([], {'file_path': 'DUMMY_FILE_PATH', 'source': 'DUMMY_SOURCE', 'tokens': 'self.dummy_tokens'}), '(file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE, tokens=self.\n dummy_tokens)\n', (944, 1023), False, 'from fixit.common.pseudo_rule import PseudoContext, PseudoLintRule\n'), ((1129, 1190), 'fixit.common.pseudo_rule.PseudoContext', 'PseudoContext', ([], {'file_path': 'DUMMY_FILE_PATH', 'source': 'DUMMY_SOURCE'}), '(file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE)\n', (1142, 1190), False, 'from fixit.common.pseudo_rule import PseudoContext, PseudoLintRule\n'), ((1388, 1484), 'fixit.common.pseudo_rule.PseudoContext', 'PseudoContext', ([], {'file_path': 'DUMMY_FILE_PATH', 'source': 'DUMMY_SOURCE', 'ast_tree': 'self.dummy_ast_tree'}), '(file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE, ast_tree=self\n .dummy_ast_tree)\n', (1401, 1484), False, 'from fixit.common.pseudo_rule import PseudoContext, PseudoLintRule\n'), ((1594, 1655), 'fixit.common.pseudo_rule.PseudoContext', 'PseudoContext', ([], {'file_path': 'DUMMY_FILE_PATH', 'source': 'DUMMY_SOURCE'}), '(file_path=DUMMY_FILE_PATH, source=DUMMY_SOURCE)\n', (1607, 1655), False, 'from fixit.common.pseudo_rule import PseudoContext, PseudoLintRule\n'), ((2431, 2443), 'fixit.common.base.LintConfig', 'LintConfig', ([], {}), '()\n', (2441, 2443), False, 'from fixit.common.base import LintConfig\n'), ((782, 806), 'io.BytesIO', 'io.BytesIO', (['DUMMY_SOURCE'], {}), '(DUMMY_SOURCE)\n', (792, 806), False, 'import io\n')] |
"""Unit-tests for module hed_utils.support.checked_param"""
import logging
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Optional
from unittest import TestCase
from hed_utils.support.checked_param import file_path
from hed_utils.support.checked_param import string_value
class StringValueTest(TestCase):
"""Unit tests for the checked_param.string_value method."""
def test_bad_type(self):
"""Verify error is raised if value is having bad type."""
name = "bad_type"
value = object()
with self.assertRaises(TypeError) as ctx:
string_value(name, value) # noqa
self.assertTupleEqual((name, str, object), ctx.exception.args)
def test_empty_ok_true(self):
"""Verify NO error is raised if value is empty and empty_ok == True."""
param_name = "empty-but-ok"
param_value = ""
self.assertEqual("", string_value(param_name, param_value))
def test_empty_ok_false(self):
"""Verify error is raised if value is empty and empty_ok == False."""
param_name = "empty-but-not-ok"
with self.assertRaises(ValueError) as ctx:
string_value(param_name, "", empty_ok=False)
self.assertTupleEqual((param_name, ""), ctx.exception.args)
with self.assertRaises(ValueError) as ctx:
string_value(
param_name,
" \t \n ",
strip_whitespace=False,
empty_ok=False
)
self.assertTupleEqual((param_name, " \t \n "), ctx.exception.args)
def test_none_ok_true(self):
"""Verify NO error is raised if value is None and none_ok == True."""
param_name = "none-but-ok"
param_value = None
self.assertIsNone(string_value(param_name, param_value, none_ok=True))
def test_none_ok_false(self):
"""Verify error is raised if value is None and none_ok == False."""
param_name = "none-but-not-ok"
param_value = None
with self.assertRaises(TypeError) as ctx:
string_value(param_name, param_value, none_ok=False)
self.assertTupleEqual((param_name, str, type(None)), ctx.exception.args)
def test_strip_whitespace_true(self):
"""Verify value is stripped when strip_whitespace == True."""
param_name = "whitespace-and-stripped"
param_value = " \n \r \t "
result = string_value(param_name, param_value, strip_whitespace=True)
self.assertEqual("", result)
with self.assertRaises(ValueError) as ctx:
string_value(
param_name, param_value, strip_whitespace=True, empty_ok=False
)
self.assertTupleEqual((param_name, param_value), ctx.exception.args)
def test_strip_whitespace_false(self):
"""Verify value is not stripped when strip_whitespace == False."""
param_name = "whitespace-but-not-stripped"
param_value = " \n \r \t "
result = string_value(param_name, param_value, strip_whitespace=False)
self.assertEqual(param_value, result)
def test_strip_quotes_false(self):
"""Verify value is stripped accordingly when strip_quotes == False."""
self.assertEqual(
"\"\n \r \t\"",
string_value(
name="quoted-whitespace-with-spacing",
value=" \"\n \r \t\" ",
strip_quotes=False,
strip_whitespace=True
)
)
self.assertEqual(
" \"\n \r \t\" ",
string_value(
name="quoted-whitespace-with-spacing",
value=" \"\n \r \t\" ",
strip_quotes=False,
strip_whitespace=False
)
)
def test_strip_quotes_true(self):
"""Verify value is stripped accordingly when strip_quotes == True."""
self.assertEqual(
" \"\n \r \t\" ",
string_value(
name="quoted-whitespace-with-spacing",
value=" \"\n \r \t\" ",
strip_quotes=True,
strip_whitespace=False,
empty_ok=True
)
)
self.assertEqual(
"",
string_value(
name="quoted-whitespace-with-spacing",
value=" \"\n \r \t\" ",
strip_quotes=True,
strip_whitespace=True,
empty_ok=True
)
)
with self.assertRaises(ValueError) as ctx:
string_value(
name="quoted-whitespace-with-spacing",
value=" \"\n \r \t\" ",
strip_quotes=True,
strip_whitespace=True,
empty_ok=False
)
self.assertTupleEqual(
("quoted-whitespace-with-spacing", " \"\n \r \t\" "),
ctx.exception.args
)
class FilePathTest(TestCase):
temp_dir: Optional[TemporaryDirectory]
temp_dir_path: Optional[Path]
def setUp(self) -> None:
self.temp_dir = TemporaryDirectory(prefix=self.__class__.__name__)
self.temp_dir_path = Path(self.temp_dir.name)
def tearDown(self) -> None:
self.temp_dir_path = None
try:
self.temp_dir.cleanup()
except Exception: # noqa
logging.error("tearDown error!", exc_info=True)
finally:
self.temp_dir = None
def test_bytes_value(self):
"""Verify bytes value can be processed."""
original_path = self.temp_dir_path / "file.txt"
name = "bytes_value"
value = str(original_path).encode("utf-8")
self.assertEqual(
original_path, file_path(name, value, readable=False)
)
with self.assertRaises(FileNotFoundError) as ctx:
file_path(name, value, readable=True)
self.assertTupleEqual((name, value), ctx.exception.args)
def test_str_value(self):
"""Verify str value can be processed."""
original_path = self.temp_dir_path / "file.txt"
name = "str_value"
value = str(original_path)
self.assertEqual(
original_path, file_path(name, value, readable=False)
)
with self.assertRaises(FileNotFoundError) as ctx:
file_path(name, value, readable=True)
self.assertTupleEqual((name, value), ctx.exception.args)
def test_path_value(self):
"""Verify Path value can be processed."""
original_path = self.temp_dir_path / "file.txt"
name = "path_value"
value = original_path
self.assertEqual(
original_path, file_path(name, value, readable=False)
)
with self.assertRaises(FileNotFoundError) as ctx:
file_path(name, value, readable=True)
self.assertTupleEqual((name, value), ctx.exception.args)
| [
"tempfile.TemporaryDirectory",
"pathlib.Path",
"hed_utils.support.checked_param.file_path",
"hed_utils.support.checked_param.string_value",
"logging.error"
] | [((2434, 2494), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', 'param_value'], {'strip_whitespace': '(True)'}), '(param_name, param_value, strip_whitespace=True)\n', (2446, 2494), False, 'from hed_utils.support.checked_param import string_value\n'), ((3002, 3063), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', 'param_value'], {'strip_whitespace': '(False)'}), '(param_name, param_value, strip_whitespace=False)\n', (3014, 3063), False, 'from hed_utils.support.checked_param import string_value\n'), ((5090, 5140), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': 'self.__class__.__name__'}), '(prefix=self.__class__.__name__)\n', (5108, 5140), False, 'from tempfile import TemporaryDirectory\n'), ((5170, 5194), 'pathlib.Path', 'Path', (['self.temp_dir.name'], {}), '(self.temp_dir.name)\n', (5174, 5194), False, 'from pathlib import Path\n'), ((619, 644), 'hed_utils.support.checked_param.string_value', 'string_value', (['name', 'value'], {}), '(name, value)\n', (631, 644), False, 'from hed_utils.support.checked_param import string_value\n'), ((930, 967), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', 'param_value'], {}), '(param_name, param_value)\n', (942, 967), False, 'from hed_utils.support.checked_param import string_value\n'), ((1187, 1231), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', '""""""'], {'empty_ok': '(False)'}), "(param_name, '', empty_ok=False)\n", (1199, 1231), False, 'from hed_utils.support.checked_param import string_value\n'), ((1364, 1439), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', '""" \t \n """'], {'strip_whitespace': '(False)', 'empty_ok': '(False)'}), "(param_name, ' \\t \\n ', strip_whitespace=False, empty_ok=False)\n", (1376, 1439), False, 'from hed_utils.support.checked_param import string_value\n'), ((1794, 1845), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', 'param_value'], {'none_ok': '(True)'}), '(param_name, param_value, none_ok=True)\n', (1806, 1845), False, 'from hed_utils.support.checked_param import string_value\n'), ((2087, 2139), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', 'param_value'], {'none_ok': '(False)'}), '(param_name, param_value, none_ok=False)\n', (2099, 2139), False, 'from hed_utils.support.checked_param import string_value\n'), ((2595, 2671), 'hed_utils.support.checked_param.string_value', 'string_value', (['param_name', 'param_value'], {'strip_whitespace': '(True)', 'empty_ok': '(False)'}), '(param_name, param_value, strip_whitespace=True, empty_ok=False)\n', (2607, 2671), False, 'from hed_utils.support.checked_param import string_value\n'), ((3296, 3416), 'hed_utils.support.checked_param.string_value', 'string_value', ([], {'name': '"""quoted-whitespace-with-spacing"""', 'value': '\' "\\n \\r \\t" \'', 'strip_quotes': '(False)', 'strip_whitespace': '(True)'}), '(name=\'quoted-whitespace-with-spacing\', value=\' "\\n \\r \\t" \',\n strip_quotes=False, strip_whitespace=True)\n', (3308, 3416), False, 'from hed_utils.support.checked_param import string_value\n'), ((3572, 3693), 'hed_utils.support.checked_param.string_value', 'string_value', ([], {'name': '"""quoted-whitespace-with-spacing"""', 'value': '\' "\\n \\r \\t" \'', 'strip_quotes': '(False)', 'strip_whitespace': '(False)'}), '(name=\'quoted-whitespace-with-spacing\', value=\' "\\n \\r \\t" \',\n strip_quotes=False, strip_whitespace=False)\n', (3584, 3693), False, 'from hed_utils.support.checked_param import string_value\n'), ((3966, 4101), 'hed_utils.support.checked_param.string_value', 'string_value', ([], {'name': '"""quoted-whitespace-with-spacing"""', 'value': '\' "\\n \\r \\t" \'', 'strip_quotes': '(True)', 'strip_whitespace': '(False)', 'empty_ok': '(True)'}), '(name=\'quoted-whitespace-with-spacing\', value=\' "\\n \\r \\t" \',\n strip_quotes=True, strip_whitespace=False, empty_ok=True)\n', (3978, 4101), False, 'from hed_utils.support.checked_param import string_value\n'), ((4259, 4393), 'hed_utils.support.checked_param.string_value', 'string_value', ([], {'name': '"""quoted-whitespace-with-spacing"""', 'value': '\' "\\n \\r \\t" \'', 'strip_quotes': '(True)', 'strip_whitespace': '(True)', 'empty_ok': '(True)'}), '(name=\'quoted-whitespace-with-spacing\', value=\' "\\n \\r \\t" \',\n strip_quotes=True, strip_whitespace=True, empty_ok=True)\n', (4271, 4393), False, 'from hed_utils.support.checked_param import string_value\n'), ((4560, 4695), 'hed_utils.support.checked_param.string_value', 'string_value', ([], {'name': '"""quoted-whitespace-with-spacing"""', 'value': '\' "\\n \\r \\t" \'', 'strip_quotes': '(True)', 'strip_whitespace': '(True)', 'empty_ok': '(False)'}), '(name=\'quoted-whitespace-with-spacing\', value=\' "\\n \\r \\t" \',\n strip_quotes=True, strip_whitespace=True, empty_ok=False)\n', (4572, 4695), False, 'from hed_utils.support.checked_param import string_value\n'), ((5729, 5767), 'hed_utils.support.checked_param.file_path', 'file_path', (['name', 'value'], {'readable': '(False)'}), '(name, value, readable=False)\n', (5738, 5767), False, 'from hed_utils.support.checked_param import file_path\n'), ((5849, 5886), 'hed_utils.support.checked_param.file_path', 'file_path', (['name', 'value'], {'readable': '(True)'}), '(name, value, readable=True)\n', (5858, 5886), False, 'from hed_utils.support.checked_param import file_path\n'), ((6204, 6242), 'hed_utils.support.checked_param.file_path', 'file_path', (['name', 'value'], {'readable': '(False)'}), '(name, value, readable=False)\n', (6213, 6242), False, 'from hed_utils.support.checked_param import file_path\n'), ((6324, 6361), 'hed_utils.support.checked_param.file_path', 'file_path', (['name', 'value'], {'readable': '(True)'}), '(name, value, readable=True)\n', (6333, 6361), False, 'from hed_utils.support.checked_param import file_path\n'), ((6677, 6715), 'hed_utils.support.checked_param.file_path', 'file_path', (['name', 'value'], {'readable': '(False)'}), '(name, value, readable=False)\n', (6686, 6715), False, 'from hed_utils.support.checked_param import file_path\n'), ((6797, 6834), 'hed_utils.support.checked_param.file_path', 'file_path', (['name', 'value'], {'readable': '(True)'}), '(name, value, readable=True)\n', (6806, 6834), False, 'from hed_utils.support.checked_param import file_path\n'), ((5357, 5404), 'logging.error', 'logging.error', (['"""tearDown error!"""'], {'exc_info': '(True)'}), "('tearDown error!', exc_info=True)\n", (5370, 5404), False, 'import logging\n')] |
from util.DateUtil import DateUtil
from db.SqlExecutor import SqlExecutor
class HAVCache:
def __init__(self):
self.db = SqlExecutor(db_name='gpp-long-term.db')
# check if we already have cached data for the provided date
def has_data_for_date(self, ticker, date, no_update_if_today=False):
found_date = self.get_last_retrieved(ticker)
# if no found date, then it isn't in the cache at all
if found_date is None:
return False
# found_date is saturday or sunday and is today, don't update cache
if DateUtil.dates_match(date, found_date) and DateUtil.is_weekend(date) and DateUtil.is_today(date):
return True
# if the date is today and it isn't the weekend, we need to update our cache always
if DateUtil.is_today(date) and not no_update_if_today:
return False
# if the date in the metadata is greater than the requested date
# we already have data for this date, otherwise we need to go get it
return found_date > date or (no_update_if_today and DateUtil.is_today(date))
def store_result_meta_data(self, ticker, last_retrieved):
found = self.get_last_retrieved(ticker)
# if there's already a metadata record, just update it
if found is not None:
sql = 'UPDATE `HISTORIC_META_DATA` SET LAST_RETRIEVED=? WHERE TICKER=?'
self.db.exec_insert(sql, (last_retrieved, ticker))
else:
sql = 'INSERT INTO `HISTORIC_META_DATA` (TICKER, LAST_RETRIEVED) VALUES (?, ?)'
self.db.exec_insert(sql, (ticker, last_retrieved))
def store_result_data(self, ticker, date, payload):
sql = 'INSERT INTO `HISTORIC_DATA` (TICKER, DATE, OPEN, HIGH, LOW, CLOSE, VOLUME) ' \
'VALUES(?, ?, ?, ?, ?, ?, ?)'
# check to make sure we're not overwriting something
data = self.get_daily_quote(ticker, date)
if data is not None:
self.db.exec_insert('DELETE FROM `HISTORIC_DATA` WHERE `TICKER`=? AND `DATE`=?', (ticker, date))
to_send = (ticker, date)
for item in payload:
to_send = to_send + (item,)
self.db.exec_insert(sql, to_send)
# Checks whether specific date is actually in the cache
def check_cache(self, ticker, date):
# don't try the DB before we know if the data will be there
if not self.has_data_for_date(ticker, date):
return None
result = self.get_daily_quote(ticker, date)
if result is None:
return None
return {'ticker': result[0], 'date': result[1], 'open': result[2],
'high': result[3], 'low': result[4], 'close': result[5], 'volume': result[6]}
def get_last_retrieved(self, ticker):
sql = 'SELECT * FROM `HISTORIC_META_DATA` WHERE TICKER=?'
result = self.db.exec_select(sql, (ticker,)).fetchone()
if result is None:
return None
found_timestamp = result[1]
return found_timestamp
def get_all_data(self, ticker):
sql = 'SELECT * FROM `HISTORIC_DATA` WHERE TICKER=?'
result = self.db.exec_select(sql, (ticker,)).fetchall()
return result
def get_rolling_window_quotes(self, ticker, end_date, num_desired):
if not self.has_data_for_date(ticker, end_date, no_update_if_today=True):
return None
sql = 'SELECT * FROM `HISTORIC_DATA` WHERE TICKER=? AND DATE <= ? ORDER BY DATE DESC LIMIT ?'
result = self.db.exec_select(sql, (ticker, end_date, num_desired)).fetchall()
return result
def get_daily_quote(self, ticker, date):
sql = 'SELECT * FROM `HISTORIC_DATA` WHERE TICKER=? AND DATE=?'
result = self.db.exec_select(sql, (ticker, date)).fetchone()
return result
def flush(self, ticker):
sql = 'DELETE FROM `HISTORIC_DATA` WHERE TICKER=?'
self.db.exec_insert(sql, (ticker,))
| [
"util.DateUtil.DateUtil.is_weekend",
"db.SqlExecutor.SqlExecutor",
"util.DateUtil.DateUtil.is_today",
"util.DateUtil.DateUtil.dates_match"
] | [((134, 173), 'db.SqlExecutor.SqlExecutor', 'SqlExecutor', ([], {'db_name': '"""gpp-long-term.db"""'}), "(db_name='gpp-long-term.db')\n", (145, 173), False, 'from db.SqlExecutor import SqlExecutor\n'), ((572, 610), 'util.DateUtil.DateUtil.dates_match', 'DateUtil.dates_match', (['date', 'found_date'], {}), '(date, found_date)\n', (592, 610), False, 'from util.DateUtil import DateUtil\n'), ((615, 640), 'util.DateUtil.DateUtil.is_weekend', 'DateUtil.is_weekend', (['date'], {}), '(date)\n', (634, 640), False, 'from util.DateUtil import DateUtil\n'), ((645, 668), 'util.DateUtil.DateUtil.is_today', 'DateUtil.is_today', (['date'], {}), '(date)\n', (662, 668), False, 'from util.DateUtil import DateUtil\n'), ((798, 821), 'util.DateUtil.DateUtil.is_today', 'DateUtil.is_today', (['date'], {}), '(date)\n', (815, 821), False, 'from util.DateUtil import DateUtil\n'), ((1086, 1109), 'util.DateUtil.DateUtil.is_today', 'DateUtil.is_today', (['date'], {}), '(date)\n', (1103, 1109), False, 'from util.DateUtil import DateUtil\n')] |
# Lint as: python3
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import socket
import sys
import time
import json
#for create a new init config file
if (0):
init_config = {}
init_config['ip']= '127.0.0.1'
init_config['port']= 2000
init_config['labels']=['person', 'dog']
init_config['debug']=1
init_config['trigger_in']='1\n'
init_config['trigger_out']='1'
init_config['trigger_out_reset']='0'
init_config['inference_modules']=['zcoral']
with open('tcp_in_out/init_config.json', 'w') as outfile:
json.dump(init_config, outfile)
class Modulo:
def __init__(self):
None
def start(self,nombre,local_data, out_data):
out_data[nombre]['error'] = {}
nfile='modulos/' + local_data['modName'] +'/'
if (len(local_data['args'])==0):
nfile = nfile+ 'init_config.json'
else:
nfile = nfile + local_data['args']
with open(nfile) as json_file:
self.init_data = json.load(json_file)
out_data[nombre]['t_count'] = 0
local_data['ip'] = self.init_data['ip']
local_data['port'] = self.init_data['port']
self.init_net(nombre, local_data, out_data)
local_data['t_last_send'] = time.time()
local_data['t_last_send_max'] = 100
local_data['debug'] = self.init_data['debug']
local_data['labels'] = self.init_data['labels']
local_data['trigger_in']=self.init_data['trigger_in']
local_data['trigger_out']=self.init_data['trigger_out']
local_data['trigger_out_reset']=self.init_data['trigger_out_reset']
local_data['inference_modules']=self.init_data['inference_modules']
local_data['time'] = time.time()
def work(self,nombre,local_data, out_data):
try:
data_in = local_data['sock'].recv(1024)
data_in=data_in.decode()
print(data_in)
#Si ha recibido el flanco manda un 0
if data_in == local_data['trigger_in']:
self.send_data(nombre, local_data, out_data, local_data['trigger_out_reset'])
out_data[nombre]['t_count'] = out_data[nombre]['t_count'] + 1
except:
None
try:
detected = 0
for i in local_data['inference_modules']:#recorremos todos los modulos de deteccion
for n in out_data[i]['detected']:#recorremos las camaras
for d in out_data[i]['detected'][n]:#Recorremos las detecciones
#for label in d[label]:
for label_local in local_data['labels']:
if d['label'] == label_local:
detected = 1
if detected:
self.send_data(nombre, local_data, out_data, local_data['trigger_out'])
except:
None
if (time.time()>(1+local_data['time'])):
self.send_data(nombre, local_data, out_data, 'alive')
local_data['time'] = time.time()
def onError(self,nombre,local_data, out_data):
self.init_net(nombre, local_data, out_data)
def event (self, nombre, local, out, event, event_sync):
None
def end (self, nombre, local_data, out_data):
None
def init_net(self, nombre, local_data, out_data):
if len (out_data[nombre]['error'].keys()):
local_data['sock'].close()
time.sleep(5)
try:
local_data['sock'] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_data['sock'].connect((local_data['ip'],local_data['port']))
local_data['sock'].settimeout(0.0001)
out_data[nombre]['error'].pop(1)
except:
None
def send_data (self, nombre, local_data, out_data, data):
try:
local_data['sock'].send(data.encode())
except:
out_data[nombre]['error'][1]='Error de red'
| [
"socket.socket",
"time.sleep",
"json.load",
"time.time",
"json.dump"
] | [((1091, 1122), 'json.dump', 'json.dump', (['init_config', 'outfile'], {}), '(init_config, outfile)\n', (1100, 1122), False, 'import json\n'), ((1802, 1813), 'time.time', 'time.time', ([], {}), '()\n', (1811, 1813), False, 'import time\n'), ((2275, 2286), 'time.time', 'time.time', ([], {}), '()\n', (2284, 2286), False, 'import time\n'), ((1540, 1560), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1549, 1560), False, 'import json\n'), ((3458, 3469), 'time.time', 'time.time', ([], {}), '()\n', (3467, 3469), False, 'import time\n'), ((3594, 3605), 'time.time', 'time.time', ([], {}), '()\n', (3603, 3605), False, 'import time\n'), ((4014, 4027), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4024, 4027), False, 'import time\n'), ((4087, 4136), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4100, 4136), False, 'import socket\n')] |
"""
<NAME>
University of Manitoba
August 30th, 2021
"""
import os
import numpy as np
from umbms import get_proj_path, verify_path, get_script_logger
from umbms.loadsave import load_pickle, save_pickle
from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy
from umbms.beamform.extras import apply_ant_t_delay
###############################################################################
__DATA_DIR = os.path.join(get_proj_path(), 'data/umbmid/g3/')
__OUT_DIR = os.path.join(get_proj_path(), 'output/g3/')
verify_path(__OUT_DIR)
# Scan frequency parameters
__INI_F = 1e9
__FIN_F = 9e9
__N_FS = 1001
# Image size
__M_SIZE = 150
# Approximate radius of each adipose shell in our array
__ADI_RADS = {
'A1': 0.05,
'A2': 0.06,
'A3': 0.07,
'A11': 0.06,
'A12': 0.05,
'A13': 0.065,
'A14': 0.06,
'A15': 0.055,
'A16': 0.07
}
# Assumed tumour radius for healthy reconstructions where the SCR
# threshold is exceeded
__HEALTHY_RAD = 0.015
# The str indicating the reference type for the tumour-containing scans
# must be in ['adi', 'fib']
__TUM_REF_STR = 'adi'
###############################################################################
# Define RGB colours for plotting DAS/DMAS/ORR
das_col = [0, 0, 0]
dmas_col = [80, 80, 80]
gd_col = [160, 160, 160]
das_col = [ii / 255 for ii in das_col]
dmas_col = [ii / 255 for ii in dmas_col]
gd_col = [ii / 255 for ii in gd_col]
###############################################################################
if __name__ == "__main__":
logger = get_script_logger(__file__)
# Load the metadata for all scans
metadata = load_pickle(os.path.join(__DATA_DIR,
'metadata_gen_three.pickle'))
n_expts = len(metadata) # Find number of experiments / scans
# Scan freqs and target freqs
scan_fs = np.linspace(__INI_F, __FIN_F, __N_FS)
# Retain only frequencies above 2 GHz, due to antenna VSWR
tar_fs = scan_fs >= 2e9
# The directory where reconstructed images are stored
img_dir = os.path.join(__OUT_DIR, 'recons/')
# The SCR thresholds to be investigated
scr_thresholds = np.linspace(0, 30, 1000)
# Init arrays for storing the sensitivity and specificity at each
# SCR threshold, for each of the three reconstruction algorithms
das_sensitivities = np.zeros_like(scr_thresholds)
dmas_sensitivities = np.zeros_like(scr_thresholds)
orr_sensitivities = np.zeros_like(scr_thresholds)
das_specificities = np.zeros_like(scr_thresholds)
dmas_specificities = np.zeros_like(scr_thresholds)
orr_specificities = np.zeros_like(scr_thresholds)
# For each SCR threshold...
for scr_ii in range(len(scr_thresholds)):
logger.info('SCR threhsold [%3d / %3d]'
% (scr_ii + 1, len(scr_thresholds)))
# Get the SCR threshold here
__SCR_THRESHOLD = scr_thresholds[scr_ii]
# Init list for storing all metadata
all_md = []
# Init lists for storing the SCR and localization errors, for
# each beamforming method
das_scrs = []
das_les = []
dmas_scrs = []
dmas_les = []
orr_scrs = []
orr_les = []
# Init lists for storing the detects and healthy detects for
# each beamformer
das_detects = []
dmas_detects = []
orr_detects = []
das_healthy_detects = []
dmas_healthy_detects = []
orr_healthy_detects = []
# Make output dir for figures
fig_out_dir = os.path.join(__OUT_DIR, 'iqms/')
verify_path(fig_out_dir)
# For each experiment / scan
for ii in range(n_expts):
# Get the metadata for this scan
tar_md = metadata[ii]
# If the scan had a fibroglandular shell (indicating it was of
# a complete tumour-containing or healthy phantom)
if 'F' in tar_md['phant_id'] and ~np.isnan(tar_md['tum_diam']):
# Use the fibroglandular reference scan
tar_img_dir = os.path.join(img_dir, 'id-%d-%s/'
% (tar_md['id'], __TUM_REF_STR))
# Load the ORR reconstructions (at each step)
orr_imgs = load_pickle(os.path.join(tar_img_dir,
'img_estimates.pickle'))
orr_img = orr_imgs[-1] # Examine the final image
# Load the DAS and DMAS reconstructions
das_img = load_pickle(os.path.join(tar_img_dir,
'das_%s.pickle'
% __TUM_REF_STR))
dmas_img = load_pickle(os.path.join(tar_img_dir,
'dmas_%s.pickle'
% __TUM_REF_STR))
# Get metadata for plotting
scan_rad = tar_md['ant_rad'] / 100
tum_x = tar_md['tum_x'] / 100
tum_y = tar_md['tum_y'] / 100
tum_rad = 0.5 * (tar_md['tum_diam'] / 100)
adi_rad = __ADI_RADS[tar_md['phant_id'].split('F')[0]]
# Correct for the antenna radius measurement position
# (measured from point on antenna stand, not from SMA
# connection location)
scan_rad += 0.03618
# Define the radius of the region of interest
roi_rad = adi_rad + 0.01
# Correct for the antenna time delay
ant_rad = apply_ant_t_delay(scan_rad=scan_rad, new_ant=True)
# Get the SCR and localization error for the DAS image
das_scr, das_d_scr = get_scr(img=das_img, roi_rad=roi_rad,
adi_rad=adi_rad,
tum_rad=tum_rad,
tum_x=tum_x, tum_y=tum_y)
das_le = get_loc_err(img=das_img, ant_rad=roi_rad,
tum_x=tum_x, tum_y=tum_y)
# Get the SCR and localization error for the DMAS image
dmas_scr, dmas_d_scr = get_scr(img=dmas_img, roi_rad=roi_rad,
adi_rad=adi_rad,
tum_rad=tum_rad,
tum_x=tum_x, tum_y=tum_y)
dmas_le = get_loc_err(img=dmas_img, ant_rad=roi_rad,
tum_x=tum_x, tum_y=tum_y)
# Get the SCR and localization error for the ORR image
orr_scr, orr_d_scr = get_scr(img=orr_img, roi_rad=roi_rad,
adi_rad=adi_rad,
tum_rad=tum_rad,
tum_x=tum_x, tum_y=tum_y)
orr_le = get_loc_err(img=orr_img, ant_rad=roi_rad,
tum_x=tum_x, tum_y=tum_y)
# Store the results
das_scrs.append((das_scr, das_d_scr))
das_les.append(das_le)
dmas_scrs.append((dmas_scr, dmas_d_scr))
dmas_les.append(dmas_le)
orr_scrs.append((orr_scr, orr_d_scr))
orr_les.append(orr_le)
# Use the tumour detection criteria to determine if
# a tumour was *accurately* (i.e., the 'detected tumor'
# corresponds to the true tumor) detected in the
# reconstructions
das_detect = (das_scr >= __SCR_THRESHOLD
and das_le <= (tum_rad + 0.005))
dmas_detect = (dmas_scr >= __SCR_THRESHOLD
and dmas_le <= (tum_rad + 0.005))
orr_detect = (orr_scr >= __SCR_THRESHOLD
and orr_le <= (tum_rad + 0.005))
# Store the true detection results
orr_detects.append(orr_detect)
das_detects.append(das_detect)
dmas_detects.append(dmas_detect)
all_md.append(tar_md)
# If the experiment was of a healthy phantom
elif 'F' in tar_md['phant_id'] and np.isnan(tar_md['tum_diam']):
# Get the directory for this image
tar_img_dir = os.path.join(img_dir, 'id-%d-adi/'
% tar_md['id'])
# Load the ORR reconstructions (at each step)
orr_imgs = load_pickle(os.path.join(tar_img_dir,
'img_estimates.pickle'))
orr_img = orr_imgs[-1] # Examine final reconstruction
# Load the DAS and DMAS reconstructions
das_img = load_pickle(os.path.join(tar_img_dir,
'das_adi.pickle'))
dmas_img = load_pickle(os.path.join(tar_img_dir,
'dmas_adi.pickle'))
# Get metadata for plotting
scan_rad = tar_md['ant_rad'] / 100
tum_x = tar_md['tum_x'] / 100
tum_y = tar_md['tum_y'] / 100
tum_rad = 0.5 * (tar_md['tum_diam'] / 100)
adi_rad = __ADI_RADS[tar_md['phant_id'].split('F')[0]]
# Correct for the antenna radius measurement position
# (measured from point on antenna stand, not from SMA
# connection location)
scan_rad += 0.03618
# Define the region of interest
roi_rad = adi_rad + 0.01
# Correct for the antenna time delay
ant_rad = apply_ant_t_delay(scan_rad=scan_rad, new_ant=True)
# Get the SCR for DAS
das_scr, das_d_scr = get_scr_healthy(img=np.abs(das_img),
roi_rad=roi_rad,
adi_rad=adi_rad,
ant_rad=roi_rad,
healthy_rad=__HEALTHY_RAD)
# Get the SCR for DMAS
dmas_scr, dmas_d_scr = get_scr_healthy(img=np.abs(dmas_img),
roi_rad=roi_rad,
adi_rad=adi_rad,
ant_rad=roi_rad,
healthy_rad=__HEALTHY_RAD)
# Get the SCR for ORR
orr_scr, orr_d_scr = get_scr_healthy(img=np.abs(orr_img),
roi_rad=roi_rad,
adi_rad=adi_rad,
ant_rad=roi_rad,
healthy_rad=__HEALTHY_RAD)
# Determine if a tumour was detected in each image
das_detect = das_scr >= __SCR_THRESHOLD
dmas_detect = dmas_scr >= __SCR_THRESHOLD
orr_detect = orr_scr >= __SCR_THRESHOLD
# Store the detection results
das_healthy_detects.append(das_detect)
dmas_healthy_detects.append(dmas_detect)
orr_healthy_detects.append(orr_detect)
# Calculate and store the sensitivities
das_sensitivities[scr_ii] = (100 * np.sum(das_detects)
/ len(das_detects))
dmas_sensitivities[scr_ii] = (100 * np.sum(dmas_detects)
/ len(dmas_detects))
orr_sensitivities[scr_ii] = (100 * np.sum(orr_detects)
/ len(orr_detects))
# Calculate and store the specificities
das_specificities[scr_ii] = \
(100 * np.sum(1 - np.array(das_healthy_detects))
/ len(das_healthy_detects))
dmas_specificities[scr_ii] = \
(100 * np.sum(1 - np.array(dmas_healthy_detects))
/ len(dmas_healthy_detects))
orr_specificities[scr_ii] = \
(100 * np.sum(1 - np.array(orr_healthy_detects))
/ len(orr_healthy_detects))
# Report the sensitivities and specificities at this SCR
# threshold to the logger
logger.info('--------------------------------------------------------')
# Report DAS
logger.info('\tDAS Sensitivity:\t%.2f%%' % das_sensitivities[scr_ii])
logger.info('\tDAS Specificity:\t%.2f%%' % das_specificities[scr_ii])
# Report DMAS
logger.info('\tDMAS Sensitivity:\t%.2f%%' % dmas_sensitivities[scr_ii])
logger.info('\tDMAS Specificity:\t%.2f%%' % dmas_specificities[scr_ii])
# Report ORR
logger.info('\tORR Sensitivity:\t\t%.2f%%' % orr_sensitivities[scr_ii])
logger.info('\tORR Specificity:\t\t%.2f%%' % orr_specificities[scr_ii])
# Save the sensitivities and specifities (as a function of the
# SCR threshold) to .pickle files
save_pickle((scr_thresholds, das_sensitivities,
dmas_sensitivities, orr_sensitivities),
os.path.join(__OUT_DIR, '%s_ref_sensitvities.pickle'
% __TUM_REF_STR))
save_pickle((scr_thresholds, das_specificities,
dmas_specificities, orr_specificities),
os.path.join(__OUT_DIR, '%s_ref_specificities.pickle'
% __TUM_REF_STR))
| [
"umbms.beamform.iqms.get_scr",
"umbms.verify_path",
"numpy.abs",
"umbms.beamform.iqms.get_loc_err",
"umbms.beamform.extras.apply_ant_t_delay",
"os.path.join",
"umbms.get_proj_path",
"numpy.sum",
"numpy.linspace",
"numpy.array",
"numpy.isnan",
"umbms.get_script_logger",
"numpy.zeros_like"
] | [((528, 550), 'umbms.verify_path', 'verify_path', (['__OUT_DIR'], {}), '(__OUT_DIR)\n', (539, 550), False, 'from umbms import get_proj_path, verify_path, get_script_logger\n'), ((435, 450), 'umbms.get_proj_path', 'get_proj_path', ([], {}), '()\n', (448, 450), False, 'from umbms import get_proj_path, verify_path, get_script_logger\n'), ((497, 512), 'umbms.get_proj_path', 'get_proj_path', ([], {}), '()\n', (510, 512), False, 'from umbms import get_proj_path, verify_path, get_script_logger\n'), ((1551, 1578), 'umbms.get_script_logger', 'get_script_logger', (['__file__'], {}), '(__file__)\n', (1568, 1578), False, 'from umbms import get_proj_path, verify_path, get_script_logger\n'), ((1856, 1893), 'numpy.linspace', 'np.linspace', (['__INI_F', '__FIN_F', '__N_FS'], {}), '(__INI_F, __FIN_F, __N_FS)\n', (1867, 1893), True, 'import numpy as np\n'), ((2059, 2093), 'os.path.join', 'os.path.join', (['__OUT_DIR', '"""recons/"""'], {}), "(__OUT_DIR, 'recons/')\n", (2071, 2093), False, 'import os\n'), ((2160, 2184), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', '(1000)'], {}), '(0, 30, 1000)\n', (2171, 2184), True, 'import numpy as np\n'), ((2349, 2378), 'numpy.zeros_like', 'np.zeros_like', (['scr_thresholds'], {}), '(scr_thresholds)\n', (2362, 2378), True, 'import numpy as np\n'), ((2404, 2433), 'numpy.zeros_like', 'np.zeros_like', (['scr_thresholds'], {}), '(scr_thresholds)\n', (2417, 2433), True, 'import numpy as np\n'), ((2458, 2487), 'numpy.zeros_like', 'np.zeros_like', (['scr_thresholds'], {}), '(scr_thresholds)\n', (2471, 2487), True, 'import numpy as np\n'), ((2512, 2541), 'numpy.zeros_like', 'np.zeros_like', (['scr_thresholds'], {}), '(scr_thresholds)\n', (2525, 2541), True, 'import numpy as np\n'), ((2567, 2596), 'numpy.zeros_like', 'np.zeros_like', (['scr_thresholds'], {}), '(scr_thresholds)\n', (2580, 2596), True, 'import numpy as np\n'), ((2621, 2650), 'numpy.zeros_like', 'np.zeros_like', (['scr_thresholds'], {}), '(scr_thresholds)\n', (2634, 2650), True, 'import numpy as np\n'), ((1645, 1698), 'os.path.join', 'os.path.join', (['__DATA_DIR', '"""metadata_gen_three.pickle"""'], {}), "(__DATA_DIR, 'metadata_gen_three.pickle')\n", (1657, 1698), False, 'import os\n'), ((3558, 3590), 'os.path.join', 'os.path.join', (['__OUT_DIR', '"""iqms/"""'], {}), "(__OUT_DIR, 'iqms/')\n", (3570, 3590), False, 'import os\n'), ((3599, 3623), 'umbms.verify_path', 'verify_path', (['fig_out_dir'], {}), '(fig_out_dir)\n', (3610, 3623), False, 'from umbms import get_proj_path, verify_path, get_script_logger\n'), ((13455, 13524), 'os.path.join', 'os.path.join', (['__OUT_DIR', "('%s_ref_sensitvities.pickle' % __TUM_REF_STR)"], {}), "(__OUT_DIR, '%s_ref_sensitvities.pickle' % __TUM_REF_STR)\n", (13467, 13524), False, 'import os\n'), ((13680, 13750), 'os.path.join', 'os.path.join', (['__OUT_DIR', "('%s_ref_specificities.pickle' % __TUM_REF_STR)"], {}), "(__OUT_DIR, '%s_ref_specificities.pickle' % __TUM_REF_STR)\n", (13692, 13750), False, 'import os\n'), ((4078, 4144), 'os.path.join', 'os.path.join', (['img_dir', "('id-%d-%s/' % (tar_md['id'], __TUM_REF_STR))"], {}), "(img_dir, 'id-%d-%s/' % (tar_md['id'], __TUM_REF_STR))\n", (4090, 4144), False, 'import os\n'), ((5638, 5688), 'umbms.beamform.extras.apply_ant_t_delay', 'apply_ant_t_delay', ([], {'scan_rad': 'scan_rad', 'new_ant': '(True)'}), '(scan_rad=scan_rad, new_ant=True)\n', (5655, 5688), False, 'from umbms.beamform.extras import apply_ant_t_delay\n'), ((5798, 5899), 'umbms.beamform.iqms.get_scr', 'get_scr', ([], {'img': 'das_img', 'roi_rad': 'roi_rad', 'adi_rad': 'adi_rad', 'tum_rad': 'tum_rad', 'tum_x': 'tum_x', 'tum_y': 'tum_y'}), '(img=das_img, roi_rad=roi_rad, adi_rad=adi_rad, tum_rad=tum_rad,\n tum_x=tum_x, tum_y=tum_y)\n', (5805, 5899), False, 'from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy\n'), ((6056, 6123), 'umbms.beamform.iqms.get_loc_err', 'get_loc_err', ([], {'img': 'das_img', 'ant_rad': 'roi_rad', 'tum_x': 'tum_x', 'tum_y': 'tum_y'}), '(img=das_img, ant_rad=roi_rad, tum_x=tum_x, tum_y=tum_y)\n', (6067, 6123), False, 'from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy\n'), ((6273, 6375), 'umbms.beamform.iqms.get_scr', 'get_scr', ([], {'img': 'dmas_img', 'roi_rad': 'roi_rad', 'adi_rad': 'adi_rad', 'tum_rad': 'tum_rad', 'tum_x': 'tum_x', 'tum_y': 'tum_y'}), '(img=dmas_img, roi_rad=roi_rad, adi_rad=adi_rad, tum_rad=tum_rad,\n tum_x=tum_x, tum_y=tum_y)\n', (6280, 6375), False, 'from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy\n'), ((6539, 6607), 'umbms.beamform.iqms.get_loc_err', 'get_loc_err', ([], {'img': 'dmas_img', 'ant_rad': 'roi_rad', 'tum_x': 'tum_x', 'tum_y': 'tum_y'}), '(img=dmas_img, ant_rad=roi_rad, tum_x=tum_x, tum_y=tum_y)\n', (6550, 6607), False, 'from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy\n'), ((6755, 6856), 'umbms.beamform.iqms.get_scr', 'get_scr', ([], {'img': 'orr_img', 'roi_rad': 'roi_rad', 'adi_rad': 'adi_rad', 'tum_rad': 'tum_rad', 'tum_x': 'tum_x', 'tum_y': 'tum_y'}), '(img=orr_img, roi_rad=roi_rad, adi_rad=adi_rad, tum_rad=tum_rad,\n tum_x=tum_x, tum_y=tum_y)\n', (6762, 6856), False, 'from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy\n'), ((7013, 7080), 'umbms.beamform.iqms.get_loc_err', 'get_loc_err', ([], {'img': 'orr_img', 'ant_rad': 'roi_rad', 'tum_x': 'tum_x', 'tum_y': 'tum_y'}), '(img=orr_img, ant_rad=roi_rad, tum_x=tum_x, tum_y=tum_y)\n', (7024, 7080), False, 'from umbms.beamform.iqms import get_scr, get_loc_err, get_scr_healthy\n'), ((11712, 11731), 'numpy.sum', 'np.sum', (['das_detects'], {}), '(das_detects)\n', (11718, 11731), True, 'import numpy as np\n'), ((11833, 11853), 'numpy.sum', 'np.sum', (['dmas_detects'], {}), '(dmas_detects)\n', (11839, 11853), True, 'import numpy as np\n'), ((11956, 11975), 'numpy.sum', 'np.sum', (['orr_detects'], {}), '(orr_detects)\n', (11962, 11975), True, 'import numpy as np\n'), ((3961, 3989), 'numpy.isnan', 'np.isnan', (["tar_md['tum_diam']"], {}), "(tar_md['tum_diam'])\n", (3969, 3989), True, 'import numpy as np\n'), ((4290, 4339), 'os.path.join', 'os.path.join', (['tar_img_dir', '"""img_estimates.pickle"""'], {}), "(tar_img_dir, 'img_estimates.pickle')\n", (4302, 4339), False, 'import os\n'), ((4554, 4612), 'os.path.join', 'os.path.join', (['tar_img_dir', "('das_%s.pickle' % __TUM_REF_STR)"], {}), "(tar_img_dir, 'das_%s.pickle' % __TUM_REF_STR)\n", (4566, 4612), False, 'import os\n'), ((4755, 4814), 'os.path.join', 'os.path.join', (['tar_img_dir', "('dmas_%s.pickle' % __TUM_REF_STR)"], {}), "(tar_img_dir, 'dmas_%s.pickle' % __TUM_REF_STR)\n", (4767, 4814), False, 'import os\n'), ((8382, 8410), 'numpy.isnan', 'np.isnan', (["tar_md['tum_diam']"], {}), "(tar_md['tum_diam'])\n", (8390, 8410), True, 'import numpy as np\n'), ((8494, 8544), 'os.path.join', 'os.path.join', (['img_dir', "('id-%d-adi/' % tar_md['id'])"], {}), "(img_dir, 'id-%d-adi/' % tar_md['id'])\n", (8506, 8544), False, 'import os\n'), ((9896, 9946), 'umbms.beamform.extras.apply_ant_t_delay', 'apply_ant_t_delay', ([], {'scan_rad': 'scan_rad', 'new_ant': '(True)'}), '(scan_rad=scan_rad, new_ant=True)\n', (9913, 9946), False, 'from umbms.beamform.extras import apply_ant_t_delay\n'), ((8690, 8739), 'os.path.join', 'os.path.join', (['tar_img_dir', '"""img_estimates.pickle"""'], {}), "(tar_img_dir, 'img_estimates.pickle')\n", (8702, 8739), False, 'import os\n'), ((8959, 9002), 'os.path.join', 'os.path.join', (['tar_img_dir', '"""das_adi.pickle"""'], {}), "(tar_img_dir, 'das_adi.pickle')\n", (8971, 9002), False, 'import os\n'), ((9094, 9138), 'os.path.join', 'os.path.join', (['tar_img_dir', '"""dmas_adi.pickle"""'], {}), "(tar_img_dir, 'dmas_adi.pickle')\n", (9106, 9138), False, 'import os\n'), ((12149, 12178), 'numpy.array', 'np.array', (['das_healthy_detects'], {}), '(das_healthy_detects)\n', (12157, 12178), True, 'import numpy as np\n'), ((12290, 12320), 'numpy.array', 'np.array', (['dmas_healthy_detects'], {}), '(dmas_healthy_detects)\n', (12298, 12320), True, 'import numpy as np\n'), ((12432, 12461), 'numpy.array', 'np.array', (['orr_healthy_detects'], {}), '(orr_healthy_detects)\n', (12440, 12461), True, 'import numpy as np\n'), ((10043, 10058), 'numpy.abs', 'np.abs', (['das_img'], {}), '(das_img)\n', (10049, 10058), True, 'import numpy as np\n'), ((10449, 10465), 'numpy.abs', 'np.abs', (['dmas_img'], {}), '(dmas_img)\n', (10455, 10465), True, 'import numpy as np\n'), ((10861, 10876), 'numpy.abs', 'np.abs', (['orr_img'], {}), '(orr_img)\n', (10867, 10876), True, 'import numpy as np\n')] |
import numpy
import log_reg_funcs
def predict(theta, x_data):
"""
Predict whether a student will be admitted.
Args:
x: array shape(m, n+1)
theta: ndarray, the optimal parameters of the cost function
Returns:
predicted: array shape(m,) of booleans
"""
probability = log_reg_funcs.sigmoid_function(numpy.dot(x_data, theta))
predicted = probability >= 0.5
return predicted
| [
"numpy.dot"
] | [((347, 371), 'numpy.dot', 'numpy.dot', (['x_data', 'theta'], {}), '(x_data, theta)\n', (356, 371), False, 'import numpy\n')] |
import click
import context
from completions import get_local_dumps
import os
import util
help_text = """(du) Push/upload database dump to server."""
@click.command('push', help=help_text)
@click.option('-v', '--verbose', is_flag=True)
@click.argument('dump', type=click.STRING, autocompletion=get_local_dumps)
@click.pass_context
def push_dump(ctx, dump, verbose):
project_id = ctx.obj['config'].get('id')
local_dump = ctx.obj['db'].get_dump_filename(dump)
if local_dump is None:
util.output_error('Database dump not found in project {}: {}'.format(ctx.obj['config'].get('id'), dump))
ctx.obj['ssh'].connect()
basename = os.path.basename(local_dump)
remote_dumps_dir = ctx.obj['ssh'].get_remote_dir(project_id, 'dumps')
remote_filename = os.path.join(remote_dumps_dir, basename)
ctx.obj['ssh'].upload_file(local_dump, remote_filename)
util.output_success('Uploaded local database dump {} to server.'.format(basename))
@click.command(name='du', help=help_text, hidden=True)
@click.option('-v', '--verbose', is_flag=True)
@click.argument('dump', type=click.STRING, autocompletion=get_local_dumps)
@click.pass_context
def push_dump_alias(ctx, dump, verbose):
context.init(ctx)
context.init_project(ctx)
ctx.invoke(push_dump, dump=dump, verbose=verbose)
| [
"click.argument",
"context.init",
"click.option",
"context.init_project",
"os.path.join",
"os.path.basename",
"click.command"
] | [((154, 191), 'click.command', 'click.command', (['"""push"""'], {'help': 'help_text'}), "('push', help=help_text)\n", (167, 191), False, 'import click\n'), ((193, 238), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'is_flag': '(True)'}), "('-v', '--verbose', is_flag=True)\n", (205, 238), False, 'import click\n'), ((240, 313), 'click.argument', 'click.argument', (['"""dump"""'], {'type': 'click.STRING', 'autocompletion': 'get_local_dumps'}), "('dump', type=click.STRING, autocompletion=get_local_dumps)\n", (254, 313), False, 'import click\n'), ((972, 1025), 'click.command', 'click.command', ([], {'name': '"""du"""', 'help': 'help_text', 'hidden': '(True)'}), "(name='du', help=help_text, hidden=True)\n", (985, 1025), False, 'import click\n'), ((1027, 1072), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'is_flag': '(True)'}), "('-v', '--verbose', is_flag=True)\n", (1039, 1072), False, 'import click\n'), ((1074, 1147), 'click.argument', 'click.argument', (['"""dump"""'], {'type': 'click.STRING', 'autocompletion': 'get_local_dumps'}), "('dump', type=click.STRING, autocompletion=get_local_dumps)\n", (1088, 1147), False, 'import click\n'), ((655, 683), 'os.path.basename', 'os.path.basename', (['local_dump'], {}), '(local_dump)\n', (671, 683), False, 'import os\n'), ((780, 820), 'os.path.join', 'os.path.join', (['remote_dumps_dir', 'basename'], {}), '(remote_dumps_dir, basename)\n', (792, 820), False, 'import os\n'), ((1213, 1230), 'context.init', 'context.init', (['ctx'], {}), '(ctx)\n', (1225, 1230), False, 'import context\n'), ((1235, 1260), 'context.init_project', 'context.init_project', (['ctx'], {}), '(ctx)\n', (1255, 1260), False, 'import context\n')] |
from __future__ import annotations
import warnings
from collections import abc
from dataclasses import dataclass
from pathlib import Path
from typing import DefaultDict, Dict, Optional, Sequence, Tuple, Type, Union, cast
from qtpy import QT_VERSION
from qtpy.QtCore import QObject, QPoint, QRect, QSize, Qt
from qtpy.QtGui import (
QColor,
QFont,
QFontDatabase,
QGuiApplication,
QIcon,
QIconEngine,
QPainter,
QPixmap,
QPixmapCache,
QTransform,
)
from qtpy.QtWidgets import QApplication, QStyleOption, QWidget
from typing_extensions import TypedDict
from ..utils import QMessageHandler
from ._animations import Animation
class Unset:
def __repr__(self) -> str:
return "UNSET"
_Unset = Unset()
# A 16 pixel-high icon yields a font size of 14, which is pixel perfect
# for font-awesome. 16 * 0.875 = 14
# The reason why the glyph size is smaller than the icon size is to
# account for font bearing.
DEFAULT_SCALING_FACTOR = 0.875
DEFAULT_OPACITY = 1
ValidColor = Union[
QColor,
int,
str,
Qt.GlobalColor,
Tuple[int, int, int, int],
Tuple[int, int, int],
None,
]
StateOrMode = Union[QIcon.State, QIcon.Mode]
StateModeKey = Union[StateOrMode, str, Sequence[StateOrMode]]
_SM_MAP: Dict[str, StateOrMode] = {
"on": QIcon.State.On,
"off": QIcon.State.Off,
"normal": QIcon.Mode.Normal,
"active": QIcon.Mode.Active,
"selected": QIcon.Mode.Selected,
"disabled": QIcon.Mode.Disabled,
}
def _norm_state_mode(key: StateModeKey) -> Tuple[QIcon.State, QIcon.Mode]:
"""return state/mode tuple given a variety of valid inputs.
Input can be either a string, or a sequence of state or mode enums.
Strings can be any combination of on, off, normal, active, selected, disabled,
sep by underscore.
"""
_sm: Sequence[StateOrMode]
if isinstance(key, str):
try:
_sm = [_SM_MAP[k.lower()] for k in key.split("_")]
except KeyError:
raise ValueError(
f"{key!r} is not a valid state key, must be a combination of {{on, "
"off, active, disabled, selected, normal} separated by underscore"
)
else:
_sm = key if isinstance(key, abc.Sequence) else [key] # type: ignore
state = next((i for i in _sm if isinstance(i, QIcon.State)), QIcon.State.Off)
mode = next((i for i in _sm if isinstance(i, QIcon.Mode)), QIcon.Mode.Normal)
return state, mode
class IconOptionDict(TypedDict, total=False):
glyph_key: str
scale_factor: float
color: ValidColor
opacity: float
animation: Optional[Animation]
transform: Optional[QTransform]
# public facing, for a nicer IDE experience than a dict
# The difference between IconOpts and _IconOptions is that all of IconOpts
# all default to `_Unset` and are intended to extend some base/default option
# IconOpts are *not* guaranteed to be fully capable of rendering an icon, whereas
# IconOptions are.
@dataclass
class IconOpts:
glyph_key: Union[str, Unset] = _Unset
scale_factor: Union[float, Unset] = _Unset
color: Union[ValidColor, Unset] = _Unset
opacity: Union[float, Unset] = _Unset
animation: Union[Animation, Unset, None] = _Unset
transform: Union[QTransform, Unset, None] = _Unset
def dict(self) -> IconOptionDict:
# not using asdict due to pickle errors on animation
d = {k: v for k, v in vars(self).items() if v is not _Unset}
return cast(IconOptionDict, d)
@dataclass
class _IconOptions:
"""The set of options needed to render a font in a single State/Mode."""
glyph_key: str
scale_factor: float = DEFAULT_SCALING_FACTOR
color: ValidColor = None
opacity: float = DEFAULT_OPACITY
animation: Optional[Animation] = None
transform: Optional[QTransform] = None
def _update(self, icon_opts: IconOpts) -> _IconOptions:
return _IconOptions(**{**vars(self), **icon_opts.dict()})
def dict(self) -> IconOptionDict:
# not using asdict due to pickle errors on animation
return cast(IconOptionDict, vars(self))
class _QFontIconEngine(QIconEngine):
_opt_hash: str = ""
def __init__(self, options: _IconOptions):
super().__init__()
self._opts: DefaultDict[
QIcon.State, Dict[QIcon.Mode, Optional[_IconOptions]]
] = DefaultDict(dict)
self._opts[QIcon.State.Off][QIcon.Mode.Normal] = options
self.update_hash()
@property
def _default_opts(self) -> _IconOptions:
return cast(_IconOptions, self._opts[QIcon.State.Off][QIcon.Mode.Normal])
def _add_opts(self, state: QIcon.State, mode: QIcon.Mode, opts: IconOpts) -> None:
self._opts[state][mode] = self._default_opts._update(opts)
self.update_hash()
def clone(self) -> QIconEngine: # pragma: no cover
ico = _QFontIconEngine(self._default_opts)
ico._opts = self._opts.copy()
return ico
def _get_opts(self, state: QIcon.State, mode: QIcon.Mode) -> _IconOptions:
opts = self._opts[state].get(mode)
if opts:
return opts
opp_state = QIcon.State.Off if state == QIcon.State.On else QIcon.State.On
if mode in (QIcon.Mode.Disabled, QIcon.Mode.Selected):
opp_mode = (
QIcon.Mode.Disabled
if mode == QIcon.Mode.Selected
else QIcon.Mode.Selected
)
for m, s in [
(QIcon.Mode.Normal, state),
(QIcon.Mode.Active, state),
(mode, opp_state),
(QIcon.Mode.Normal, opp_state),
(QIcon.Mode.Active, opp_state),
(opp_mode, state),
(opp_mode, opp_state),
]:
opts = self._opts[s].get(m)
if opts:
return opts
else:
opp_mode = (
QIcon.Mode.Active if mode == QIcon.Mode.Normal else QIcon.Mode.Normal
)
for m, s in [
(opp_mode, state),
(mode, opp_state),
(opp_mode, opp_state),
(QIcon.Mode.Disabled, state),
(QIcon.Mode.Selected, state),
(QIcon.Mode.Disabled, opp_state),
(QIcon.Mode.Selected, opp_state),
]:
opts = self._opts[s].get(m)
if opts:
return opts
return self._default_opts
def paint(
self,
painter: QPainter,
rect: QRect,
mode: QIcon.Mode,
state: QIcon.State,
) -> None:
opts = self._get_opts(state, mode)
char, family, style = QFontIconStore.key2glyph(opts.glyph_key)
# font
font = QFont()
font.setFamily(family) # set sepeartely for Qt6
font.setPixelSize(round(rect.height() * opts.scale_factor))
if style:
font.setStyleName(style)
# color
if isinstance(opts.color, tuple):
color_args = opts.color
else:
color_args = (opts.color,) if opts.color else () # type: ignore
# animation
if opts.animation is not None:
opts.animation.animate(painter)
# animation
if opts.transform is not None:
painter.setTransform(opts.transform, True)
painter.save()
painter.setPen(QColor(*color_args))
painter.setOpacity(opts.opacity)
painter.setFont(font)
with QMessageHandler(): # avoid "Populating font family aliases" warning
painter.drawText(rect, Qt.AlignmentFlag.AlignCenter, char)
painter.restore()
def pixmap(self, size: QSize, mode: QIcon.Mode, state: QIcon.State) -> QPixmap:
# first look in cache
pmckey = self._pmcKey(size, mode, state)
pm = QPixmapCache.find(pmckey) if pmckey else None
if pm:
return pm
pixmap = QPixmap(size)
if not size.isValid():
return pixmap
pixmap.fill(Qt.GlobalColor.transparent)
painter = QPainter(pixmap)
self.paint(painter, QRect(QPoint(0, 0), size), mode, state)
painter.end()
# Apply palette-based styles for disabled/selected modes
# unless the user has specifically set a color for this mode/state
if mode != QIcon.Mode.Normal:
ico_opts = self._opts[state].get(mode)
if not ico_opts or not ico_opts.color:
opt = QStyleOption()
opt.palette = QGuiApplication.palette()
generated = QApplication.style().generatedIconPixmap(mode, pixmap, opt)
if not generated.isNull():
pixmap = generated
if pmckey and not pixmap.isNull():
QPixmapCache.insert(pmckey, pixmap)
return pixmap
def _pmcKey(self, size: QSize, mode: QIcon.Mode, state: QIcon.State) -> str:
# Qt6-style enums
if self._get_opts(state, mode).animation:
return ""
if hasattr(mode, "value"):
mode = mode.value
if hasattr(state, "value"):
state = state.value
k = ((((((size.width()) << 11) | size.height()) << 11) | mode) << 4) | state
return f"$superqt_{self._opt_hash}_{hex(k)}"
def update_hash(self) -> None:
hsh = id(self)
for state, d in self._opts.items():
for mode, opts in d.items():
if not opts:
continue
hsh += hash(
hash(opts.glyph_key) + hash(opts.color) + hash(state) + hash(mode)
)
self._opt_hash = hex(hsh)
class QFontIcon(QIcon):
def __init__(self, options: _IconOptions) -> None:
self._engine = _QFontIconEngine(options)
super().__init__(self._engine)
def addState(
self,
state: QIcon.State = QIcon.State.Off,
mode: QIcon.Mode = QIcon.Mode.Normal,
glyph_key: Union[str, Unset] = _Unset,
scale_factor: Union[float, Unset] = _Unset,
color: Union[ValidColor, Unset] = _Unset,
opacity: Union[float, Unset] = _Unset,
animation: Union[Animation, Unset, None] = _Unset,
transform: Union[QTransform, Unset, None] = _Unset,
) -> None:
"""Set icon options for a specific mode/state."""
if glyph_key is not _Unset:
QFontIconStore.key2glyph(glyph_key) # type: ignore
_opts = IconOpts(
glyph_key=glyph_key,
scale_factor=scale_factor,
color=color,
opacity=opacity,
animation=animation,
transform=transform,
)
self._engine._add_opts(state, mode, _opts)
class QFontIconStore(QObject):
# map of key -> (font_family, font_style)
_LOADED_KEYS: Dict[str, Tuple[str, Optional[str]]] = dict()
# map of (font_family, font_style) -> character (char may include key)
_CHARMAPS: Dict[Tuple[str, Optional[str]], Dict[str, str]] = dict()
# singleton instance, use `instance()` to retrieve
__instance: Optional[QFontIconStore] = None
def __init__(self, parent: Optional[QObject] = None) -> None:
super().__init__(parent=parent)
# QT6 drops this
dpi = getattr(Qt.ApplicationAttribute, "AA_UseHighDpiPixmaps", None)
if dpi:
QApplication.setAttribute(dpi)
@classmethod
def instance(cls) -> QFontIconStore:
if cls.__instance is None:
cls.__instance = cls()
return cls.__instance
@classmethod
def clear(cls) -> None:
cls._LOADED_KEYS.clear()
cls._CHARMAPS.clear()
QFontDatabase.removeAllApplicationFonts()
@classmethod
def _key2family(cls, key: str) -> Tuple[str, Optional[str]]:
"""Return (family, style) given a font `key`"""
key = key.split(".", maxsplit=1)[0]
if key not in cls._LOADED_KEYS:
from . import _plugins
try:
font_cls = _plugins.get_font_class(key)
result = cls.addFont(
font_cls.__font_file__, key, charmap=font_cls.__dict__
)
if not result: # pragma: no cover
raise Exception("Invalid font file")
cls._LOADED_KEYS[key] = result
except ValueError as e:
raise ValueError(
f"Unrecognized font key: {key!r}.\n"
f"Known plugin keys include: {_plugins.available()}.\n"
f"Loaded keys include: {list(cls._LOADED_KEYS)}."
) from e
return cls._LOADED_KEYS[key]
@classmethod
def _ensure_char(cls, char: str, family: str, style: str) -> str:
"""make sure that `char` is a glyph provided by `family` and `style`."""
if len(char) == 1 and ord(char) > 256:
return char
try:
charmap = cls._CHARMAPS[(family, style)]
except KeyError:
raise KeyError(f"No charmap registered for font '{family} ({style})'")
if char in charmap:
# split in case the charmap includes the key
return charmap[char].split(".", maxsplit=1)[-1]
ident = _ensure_identifier(char)
if ident in charmap:
return charmap[ident].split(".", maxsplit=1)[-1]
ident = f"{char!r} or {ident!r}" if char != ident else repr(ident)
raise ValueError(f"Font '{family} ({style})' has no glyph with the key {ident}")
@classmethod
def key2glyph(cls, glyph_key: str) -> tuple[str, str, Optional[str]]:
"""Return (char, family, style) given a `glyph_key`"""
if "." not in glyph_key:
raise ValueError("Glyph key must contain a period")
font_key, char = glyph_key.split(".", maxsplit=1)
family, style = cls._key2family(font_key)
char = cls._ensure_char(char, family, style)
return char, family, style
@classmethod
def addFont(
cls, filepath: str, prefix: str, charmap: Optional[Dict[str, str]] = None
) -> Optional[Tuple[str, str]]:
"""Add font at `filepath` to the registry under `key`.
If you'd like to later use a fontkey in the form of `key.some-name`, then
`charmap` must be provided and provide a mapping for all of the glyph names
to their unicode numbers. If a charmap is not provided, glyphs must be directly
accessed with their unicode as something like `key.\uffff`.
Parameters
----------
filepath : str
Path to an OTF or TTF file containing the fonts
key : str
A key that will represent this font file when used for lookup. For example,
'fa5s' for 'Font-Awesome 5 Solid'.
charmap : Dict[str, str], optional
optional mapping for all of the glyph names to their unicode numbers.
See note above.
Returns
-------
Tuple[str, str], optional
font-family and font-style for the file just registered, or None if
something goes wrong.
"""
if prefix in cls._LOADED_KEYS:
warnings.warn(f"Prefix {prefix} already loaded")
return
if not Path(filepath).exists():
raise FileNotFoundError(f"Font file doesn't exist: {filepath}")
if QApplication.instance() is None:
raise RuntimeError("Please create QApplication before adding a Font")
fontId = QFontDatabase.addApplicationFont(str(Path(filepath).absolute()))
if fontId < 0: # pragma: no cover
warnings.warn(f"Cannot load font file: {filepath}")
return None
families = QFontDatabase.applicationFontFamilies(fontId)
if not families: # pragma: no cover
warnings.warn(f"Font file is empty!: {filepath}")
return None
family: str = families[0]
# in Qt6, everything becomes a static member
QFd: Union[QFontDatabase, Type[QFontDatabase]] = (
QFontDatabase() # type: ignore
if tuple(QT_VERSION.split(".")) < ("6", "0")
else QFontDatabase
)
styles = QFd.styles(family) # type: ignore
style: str = styles[-1] if styles else ""
if not QFd.isSmoothlyScalable(family, style): # pragma: no cover
warnings.warn(
f"Registered font {family} ({style}) is not smoothly scalable. "
"Icons may not look attractive."
)
cls._LOADED_KEYS[prefix] = (family, style)
if charmap:
cls._CHARMAPS[(family, style)] = charmap
return (family, style)
def icon(
self,
glyph_key: str,
*,
scale_factor: float = DEFAULT_SCALING_FACTOR,
color: ValidColor = None,
opacity: float = 1,
animation: Optional[Animation] = None,
transform: Optional[QTransform] = None,
states: Dict[str, Union[IconOptionDict, IconOpts]] = {},
) -> QFontIcon:
self.key2glyph(glyph_key) # make sure it's a valid glyph_key
default_opts = _IconOptions(
glyph_key=glyph_key,
scale_factor=scale_factor,
color=color,
opacity=opacity,
animation=animation,
transform=transform,
)
icon = QFontIcon(default_opts)
for kw, options in states.items():
if isinstance(options, IconOpts):
options = default_opts._update(options).dict()
icon.addState(*_norm_state_mode(kw), **options)
return icon
def setTextIcon(
self, widget: QWidget, glyph_key: str, size: Optional[float] = None
) -> None:
"""Sets text on a widget to a specific font & glyph.
This is an alternative to setting a QIcon with a pixmap. It may
be easier to combine with dynamic stylesheets.
"""
setText = getattr(widget, "setText", None)
if not setText: # pragma: no cover
raise TypeError(f"Object does not a setText method: {widget}")
glyph = self.key2glyph(glyph_key)[0]
size = size or DEFAULT_SCALING_FACTOR
size = size if size > 1 else widget.height() * size
widget.setFont(self.font(glyph_key, int(size)))
setText(glyph)
def font(self, font_prefix: str, size: Optional[int] = None) -> QFont:
"""Create QFont for `font_prefix`"""
font_key, _ = font_prefix.split(".", maxsplit=1)
family, style = self._key2family(font_key)
font = QFont()
font.setFamily(family)
if style:
font.setStyleName(style)
if size:
font.setPixelSize(int(size))
return font
def _ensure_identifier(name: str) -> str:
"""Normalize string to valid identifier"""
import keyword
if not name:
return ""
# add _ to beginning of names starting with numbers
if name[0].isdigit():
name = f"_{name}"
# add _ to end of reserved keywords
if keyword.iskeyword(name):
name += "_"
# replace dashes and spaces with underscores
name = name.replace("-", "_").replace(" ", "_")
assert str.isidentifier(name), f"Could not canonicalize name: {name}"
return name
| [
"qtpy.QtGui.QPixmap",
"qtpy.QtWidgets.QStyleOption",
"qtpy.QtWidgets.QApplication.instance",
"qtpy.QtGui.QPixmapCache.find",
"qtpy.QtGui.QFontDatabase.applicationFontFamilies",
"qtpy.QtGui.QFontDatabase.removeAllApplicationFonts",
"pathlib.Path",
"keyword.iskeyword",
"warnings.warn",
"qtpy.QtGui.QColor",
"qtpy.QtCore.QPoint",
"qtpy.QtWidgets.QApplication.setAttribute",
"qtpy.QtGui.QFontDatabase",
"qtpy.QtGui.QFont",
"typing.cast",
"qtpy.QtGui.QPainter",
"qtpy.QtGui.QGuiApplication.palette",
"typing.DefaultDict",
"qtpy.QT_VERSION.split",
"qtpy.QtGui.QPixmapCache.insert",
"qtpy.QtWidgets.QApplication.style"
] | [((19084, 19107), 'keyword.iskeyword', 'keyword.iskeyword', (['name'], {}), '(name)\n', (19101, 19107), False, 'import keyword\n'), ((3474, 3497), 'typing.cast', 'cast', (['IconOptionDict', 'd'], {}), '(IconOptionDict, d)\n', (3478, 3497), False, 'from typing import DefaultDict, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((4352, 4369), 'typing.DefaultDict', 'DefaultDict', (['dict'], {}), '(dict)\n', (4363, 4369), False, 'from typing import DefaultDict, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((4537, 4603), 'typing.cast', 'cast', (['_IconOptions', 'self._opts[QIcon.State.Off][QIcon.Mode.Normal]'], {}), '(_IconOptions, self._opts[QIcon.State.Off][QIcon.Mode.Normal])\n', (4541, 4603), False, 'from typing import DefaultDict, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((6769, 6776), 'qtpy.QtGui.QFont', 'QFont', ([], {}), '()\n', (6774, 6776), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((7957, 7970), 'qtpy.QtGui.QPixmap', 'QPixmap', (['size'], {}), '(size)\n', (7964, 7970), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((8094, 8110), 'qtpy.QtGui.QPainter', 'QPainter', (['pixmap'], {}), '(pixmap)\n', (8102, 8110), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((11683, 11724), 'qtpy.QtGui.QFontDatabase.removeAllApplicationFonts', 'QFontDatabase.removeAllApplicationFonts', ([], {}), '()\n', (11722, 11724), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((15739, 15784), 'qtpy.QtGui.QFontDatabase.applicationFontFamilies', 'QFontDatabase.applicationFontFamilies', (['fontId'], {}), '(fontId)\n', (15776, 15784), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((18609, 18616), 'qtpy.QtGui.QFont', 'QFont', ([], {}), '()\n', (18614, 18616), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((7409, 7428), 'qtpy.QtGui.QColor', 'QColor', (['*color_args'], {}), '(*color_args)\n', (7415, 7428), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((7857, 7882), 'qtpy.QtGui.QPixmapCache.find', 'QPixmapCache.find', (['pmckey'], {}), '(pmckey)\n', (7874, 7882), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((8801, 8836), 'qtpy.QtGui.QPixmapCache.insert', 'QPixmapCache.insert', (['pmckey', 'pixmap'], {}), '(pmckey, pixmap)\n', (8820, 8836), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((11376, 11406), 'qtpy.QtWidgets.QApplication.setAttribute', 'QApplication.setAttribute', (['dpi'], {}), '(dpi)\n', (11401, 11406), False, 'from qtpy.QtWidgets import QApplication, QStyleOption, QWidget\n'), ((15194, 15242), 'warnings.warn', 'warnings.warn', (['f"""Prefix {prefix} already loaded"""'], {}), "(f'Prefix {prefix} already loaded')\n", (15207, 15242), False, 'import warnings\n'), ((15390, 15413), 'qtpy.QtWidgets.QApplication.instance', 'QApplication.instance', ([], {}), '()\n', (15411, 15413), False, 'from qtpy.QtWidgets import QApplication, QStyleOption, QWidget\n'), ((15643, 15694), 'warnings.warn', 'warnings.warn', (['f"""Cannot load font file: {filepath}"""'], {}), "(f'Cannot load font file: {filepath}')\n", (15656, 15694), False, 'import warnings\n'), ((15842, 15891), 'warnings.warn', 'warnings.warn', (['f"""Font file is empty!: {filepath}"""'], {}), "(f'Font file is empty!: {filepath}')\n", (15855, 15891), False, 'import warnings\n'), ((16075, 16090), 'qtpy.QtGui.QFontDatabase', 'QFontDatabase', ([], {}), '()\n', (16088, 16090), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((16394, 16513), 'warnings.warn', 'warnings.warn', (['f"""Registered font {family} ({style}) is not smoothly scalable. Icons may not look attractive."""'], {}), "(\n f'Registered font {family} ({style}) is not smoothly scalable. Icons may not look attractive.'\n )\n", (16407, 16513), False, 'import warnings\n'), ((8145, 8157), 'qtpy.QtCore.QPoint', 'QPoint', (['(0)', '(0)'], {}), '(0, 0)\n', (8151, 8157), False, 'from qtpy.QtCore import QObject, QPoint, QRect, QSize, Qt\n'), ((8504, 8518), 'qtpy.QtWidgets.QStyleOption', 'QStyleOption', ([], {}), '()\n', (8516, 8518), False, 'from qtpy.QtWidgets import QApplication, QStyleOption, QWidget\n'), ((8549, 8574), 'qtpy.QtGui.QGuiApplication.palette', 'QGuiApplication.palette', ([], {}), '()\n', (8572, 8574), False, 'from qtpy.QtGui import QColor, QFont, QFontDatabase, QGuiApplication, QIcon, QIconEngine, QPainter, QPixmap, QPixmapCache, QTransform\n'), ((15278, 15292), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (15282, 15292), False, 'from pathlib import Path\n'), ((16128, 16149), 'qtpy.QT_VERSION.split', 'QT_VERSION.split', (['"""."""'], {}), "('.')\n", (16144, 16149), False, 'from qtpy import QT_VERSION\n'), ((8603, 8623), 'qtpy.QtWidgets.QApplication.style', 'QApplication.style', ([], {}), '()\n', (8621, 8623), False, 'from qtpy.QtWidgets import QApplication, QStyleOption, QWidget\n'), ((15560, 15574), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (15564, 15574), False, 'from pathlib import Path\n')] |
# Generated by Django 2.2.2 on 2019-06-17 03:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fair', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='author',
field=models.CharField(default='Andres', max_length=500, verbose_name='Autor'),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((318, 390), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Andres"""', 'max_length': '(500)', 'verbose_name': '"""Autor"""'}), "(default='Andres', max_length=500, verbose_name='Autor')\n", (334, 390), False, 'from django.db import migrations, models\n')] |
import numpy as np
import pytest
import xarray as xr
from numpy.testing import assert_array_equal
from tiktorch.converters import (
NamedExplicitOutputShape,
NamedImplicitOutputShape,
NamedParametrizedShape,
input_shape_to_pb_input_shape,
numpy_to_pb_tensor,
output_shape_to_pb_output_shape,
pb_tensor_to_numpy,
pb_tensor_to_xarray,
xarray_to_pb_tensor,
)
from tiktorch.proto import inference_pb2
def _numpy_to_pb_tensor(arr):
"""
Makes sure that tensor was serialized/deserialized
"""
tensor = numpy_to_pb_tensor(arr)
parsed = inference_pb2.Tensor()
parsed.ParseFromString(tensor.SerializeToString())
return parsed
class TestNumpyToPBTensor:
def test_should_serialize_to_tensor_type(self):
arr = np.arange(9)
tensor = _numpy_to_pb_tensor(arr)
assert isinstance(tensor, inference_pb2.Tensor)
@pytest.mark.parametrize("np_dtype,dtype_str", [(np.int64, "int64"), (np.uint8, "uint8"), (np.float32, "float32")])
def test_should_have_dtype_as_str(self, np_dtype, dtype_str):
arr = np.arange(9, dtype=np_dtype)
tensor = _numpy_to_pb_tensor(arr)
assert arr.dtype == tensor.dtype
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_have_shape(self, shape):
arr = np.zeros(shape)
tensor = _numpy_to_pb_tensor(arr)
assert tensor.shape
assert list(shape) == [dim.size for dim in tensor.shape]
def test_should_have_serialized_bytes(self):
arr = np.arange(9, dtype=np.uint8)
expected = bytes(arr)
tensor = _numpy_to_pb_tensor(arr)
assert expected == tensor.buffer
class TestPBTensorToNumpy:
def test_should_raise_on_empty_dtype(self):
tensor = inference_pb2.Tensor(dtype="", shape=[inference_pb2.NamedInt(size=1), inference_pb2.NamedInt(size=2)])
with pytest.raises(ValueError):
pb_tensor_to_numpy(tensor)
def test_should_raise_on_empty_shape(self):
tensor = inference_pb2.Tensor(dtype="int64", shape=[])
with pytest.raises(ValueError):
pb_tensor_to_numpy(tensor)
def test_should_return_ndarray(self):
arr = np.arange(9)
parsed = _numpy_to_pb_tensor(arr)
result_arr = pb_tensor_to_numpy(parsed)
assert isinstance(result_arr, np.ndarray)
@pytest.mark.parametrize("np_dtype,dtype_str", [(np.int64, "int64"), (np.uint8, "uint8"), (np.float32, "float32")])
def test_should_have_same_dtype(self, np_dtype, dtype_str):
arr = np.arange(9, dtype=np_dtype)
tensor = _numpy_to_pb_tensor(arr)
result_arr = pb_tensor_to_numpy(tensor)
assert arr.dtype == result_arr.dtype
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_same_shape(self, shape):
arr = np.zeros(shape)
tensor = _numpy_to_pb_tensor(arr)
result_arr = pb_tensor_to_numpy(tensor)
assert arr.shape == result_arr.shape
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_same_data(self, shape):
arr = np.random.random(shape)
tensor = _numpy_to_pb_tensor(arr)
result_arr = pb_tensor_to_numpy(tensor)
assert_array_equal(arr, result_arr)
class TestXarrayToPBTensor:
def to_pb_tensor(self, arr):
"""
Makes sure that tensor was serialized/deserialized
"""
tensor = xarray_to_pb_tensor(arr)
parsed = inference_pb2.Tensor()
parsed.ParseFromString(tensor.SerializeToString())
return parsed
def test_should_serialize_to_tensor_type(self):
xarr = xr.DataArray(np.arange(8).reshape((2, 4)), dims=("x", "y"))
pb_tensor = self.to_pb_tensor(xarr)
assert isinstance(pb_tensor, inference_pb2.Tensor)
assert len(pb_tensor.shape) == 2
dim1 = pb_tensor.shape[0]
dim2 = pb_tensor.shape[1]
assert dim1.size == 2
assert dim1.name == "x"
assert dim2.size == 4
assert dim2.name == "y"
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_have_shape(self, shape):
arr = xr.DataArray(np.zeros(shape))
tensor = self.to_pb_tensor(arr)
assert tensor.shape
assert list(shape) == [dim.size for dim in tensor.shape]
def test_should_have_serialized_bytes(self):
arr = xr.DataArray(np.arange(9, dtype=np.uint8))
expected = bytes(arr.data)
tensor = self.to_pb_tensor(arr)
assert expected == tensor.buffer
class TestPBTensorToXarray:
def to_pb_tensor(self, arr):
"""
Makes sure that tensor was serialized/deserialized
"""
tensor = xarray_to_pb_tensor(arr)
parsed = inference_pb2.Tensor()
parsed.ParseFromString(tensor.SerializeToString())
return parsed
def test_should_raise_on_empty_dtype(self):
tensor = inference_pb2.Tensor(dtype="", shape=[inference_pb2.NamedInt(size=1), inference_pb2.NamedInt(size=2)])
with pytest.raises(ValueError):
pb_tensor_to_xarray(tensor)
def test_should_raise_on_empty_shape(self):
tensor = inference_pb2.Tensor(dtype="int64", shape=[])
with pytest.raises(ValueError):
pb_tensor_to_xarray(tensor)
def test_should_return_ndarray(self):
arr = xr.DataArray(np.arange(9))
parsed = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(parsed)
assert isinstance(result_arr, xr.DataArray)
@pytest.mark.parametrize("np_dtype,dtype_str", [(np.int64, "int64"), (np.uint8, "uint8"), (np.float32, "float32")])
def test_should_have_same_dtype(self, np_dtype, dtype_str):
arr = xr.DataArray(np.arange(9, dtype=np_dtype))
tensor = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(tensor)
assert arr.dtype == result_arr.dtype
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_same_shape(self, shape):
arr = xr.DataArray(np.zeros(shape))
tensor = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(tensor)
assert arr.shape == result_arr.shape
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_same_data(self, shape):
arr = xr.DataArray(np.random.random(shape))
tensor = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(tensor)
assert_array_equal(arr, result_arr)
class TestShapeConversions:
def to_named_explicit_shape(self, shape, axes, halo):
return NamedExplicitOutputShape(
halo=[(name, dim) for name, dim in zip(axes, halo)], shape=[(name, dim) for name, dim in zip(axes, shape)]
)
def to_named_implicit_shape(self, axes, halo, offset, scales, reference_tensor):
return NamedImplicitOutputShape(
halo=[(name, dim) for name, dim in zip(axes, halo)],
offset=[(name, dim) for name, dim in zip(axes, offset)],
scale=[(name, scale) for name, scale in zip(axes, scales)],
reference_tensor=reference_tensor,
)
def to_named_paramtrized_shape(self, min_shape, axes, step):
return NamedParametrizedShape(
min_shape=[(name, dim) for name, dim in zip(axes, min_shape)],
step_shape=[(name, dim) for name, dim in zip(axes, step)],
)
@pytest.mark.parametrize(
"shape,axes,halo",
[((42,), "x", (0,)), ((42, 128, 5), "abc", (1, 1, 1)), ((5, 4, 3, 2, 1, 42), "btzyxc", (1, 2, 3, 4, 5, 24))],
)
def test_explicit_output_shape(self, shape, axes, halo):
named_shape = self.to_named_explicit_shape(shape, axes, halo)
pb_shape = output_shape_to_pb_output_shape(named_shape)
assert pb_shape.shapeType == 0
assert pb_shape.referenceTensor == ""
assert len(pb_shape.scale.scales) == 0
assert len(pb_shape.offset.dims) == 0
assert [(d.name, d.size) for d in pb_shape.halo.dims] == [(name, size) for name, size in zip(axes, halo)]
assert [(d.name, d.size) for d in pb_shape.shape.dims] == [(name, size) for name, size in zip(axes, shape)]
@pytest.mark.parametrize(
"axes,halo,offset,scales,reference_tensor",
[("x", (0,), (10,), (1.0,), "forty-two"), ("abc", (1, 1, 1), (1, 2, 3), (1.0, 2.0, 3.0), "helloworld")],
)
def test_implicit_output_shape(self, axes, halo, offset, scales, reference_tensor):
named_shape = self.to_named_implicit_shape(axes, halo, offset, scales, reference_tensor)
pb_shape = output_shape_to_pb_output_shape(named_shape)
assert pb_shape.shapeType == 1
assert pb_shape.referenceTensor == reference_tensor
assert [(d.name, d.size) for d in pb_shape.scale.scales] == [(name, size) for name, size in zip(axes, scales)]
assert [(d.name, d.size) for d in pb_shape.offset.dims] == [(name, size) for name, size in zip(axes, offset)]
assert [(d.name, d.size) for d in pb_shape.halo.dims] == [(name, size) for name, size in zip(axes, halo)]
assert len(pb_shape.shape.dims) == 0
def test_output_shape_raises(self):
shape = [("a", 1)]
with pytest.raises(TypeError):
_ = output_shape_to_pb_output_shape(shape)
@pytest.mark.parametrize(
"shape,axes",
[((42,), "x"), ((42, 128, 5), "abc"), ((5, 4, 3, 2, 1, 42), "btzyxc")],
)
def test_explicit_input_shape(self, shape, axes):
named_shape = [(name, dim) for name, dim in zip(axes, shape)]
pb_shape = input_shape_to_pb_input_shape(named_shape)
assert pb_shape.shapeType == 0
assert [(d.name, d.size) for d in pb_shape.shape.dims] == [(name, size) for name, size in zip(axes, shape)]
@pytest.mark.parametrize(
"min_shape,axes,step",
[
((42,), "x", (5,)),
((42, 128, 5), "abc", (1, 2, 3)),
((5, 4, 3, 2, 1, 42), "btzyxc", (15, 24, 33, 42, 51, 642)),
],
)
def test_parametrized_input_shape(self, min_shape, axes, step):
named_shape = self.to_named_paramtrized_shape(min_shape, axes, step)
pb_shape = input_shape_to_pb_input_shape(named_shape)
assert pb_shape.shapeType == 1
assert [(d.name, d.size) for d in pb_shape.shape.dims] == [(name, size) for name, size in zip(axes, min_shape)]
assert [(d.name, d.size) for d in pb_shape.stepShape.dims] == [(name, size) for name, size in zip(axes, step)]
| [
"numpy.random.random",
"numpy.testing.assert_array_equal",
"tiktorch.proto.inference_pb2.NamedInt",
"tiktorch.converters.pb_tensor_to_numpy",
"tiktorch.converters.output_shape_to_pb_output_shape",
"pytest.mark.parametrize",
"numpy.zeros",
"tiktorch.converters.xarray_to_pb_tensor",
"pytest.raises",
"tiktorch.converters.pb_tensor_to_xarray",
"tiktorch.converters.input_shape_to_pb_input_shape",
"tiktorch.proto.inference_pb2.Tensor",
"numpy.arange",
"tiktorch.converters.numpy_to_pb_tensor"
] | [((550, 573), 'tiktorch.converters.numpy_to_pb_tensor', 'numpy_to_pb_tensor', (['arr'], {}), '(arr)\n', (568, 573), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((587, 609), 'tiktorch.proto.inference_pb2.Tensor', 'inference_pb2.Tensor', ([], {}), '()\n', (607, 609), False, 'from tiktorch.proto import inference_pb2\n'), ((895, 1014), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""np_dtype,dtype_str"""', "[(np.int64, 'int64'), (np.uint8, 'uint8'), (np.float32, 'float32')]"], {}), "('np_dtype,dtype_str', [(np.int64, 'int64'), (np.\n uint8, 'uint8'), (np.float32, 'float32')])\n", (918, 1014), False, 'import pytest\n'), ((1208, 1277), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(3, 3), (1,), (1, 1), (18, 20, 1)]'], {}), "('shape', [(3, 3), (1,), (1, 1), (18, 20, 1)])\n", (1231, 1277), False, 'import pytest\n'), ((2379, 2498), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""np_dtype,dtype_str"""', "[(np.int64, 'int64'), (np.uint8, 'uint8'), (np.float32, 'float32')]"], {}), "('np_dtype,dtype_str', [(np.int64, 'int64'), (np.\n uint8, 'uint8'), (np.float32, 'float32')])\n", (2402, 2498), False, 'import pytest\n'), ((2743, 2812), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(3, 3), (1,), (1, 1), (18, 20, 1)]'], {}), "('shape', [(3, 3), (1,), (1, 1), (18, 20, 1)])\n", (2766, 2812), False, 'import pytest\n'), ((3029, 3098), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(3, 3), (1,), (1, 1), (18, 20, 1)]'], {}), "('shape', [(3, 3), (1,), (1, 1), (18, 20, 1)])\n", (3052, 3098), False, 'import pytest\n'), ((4097, 4166), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(3, 3), (1,), (1, 1), (18, 20, 1)]'], {}), "('shape', [(3, 3), (1,), (1, 1), (18, 20, 1)])\n", (4120, 4166), False, 'import pytest\n'), ((5595, 5714), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""np_dtype,dtype_str"""', "[(np.int64, 'int64'), (np.uint8, 'uint8'), (np.float32, 'float32')]"], {}), "('np_dtype,dtype_str', [(np.int64, 'int64'), (np.\n uint8, 'uint8'), (np.float32, 'float32')])\n", (5618, 5714), False, 'import pytest\n'), ((5972, 6041), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(3, 3), (1,), (1, 1), (18, 20, 1)]'], {}), "('shape', [(3, 3), (1,), (1, 1), (18, 20, 1)])\n", (5995, 6041), False, 'import pytest\n'), ((6271, 6340), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(3, 3), (1,), (1, 1), (18, 20, 1)]'], {}), "('shape', [(3, 3), (1,), (1, 1), (18, 20, 1)])\n", (6294, 6340), False, 'import pytest\n'), ((7485, 7647), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape,axes,halo"""', "[((42,), 'x', (0,)), ((42, 128, 5), 'abc', (1, 1, 1)), ((5, 4, 3, 2, 1, 42),\n 'btzyxc', (1, 2, 3, 4, 5, 24))]"], {}), "('shape,axes,halo', [((42,), 'x', (0,)), ((42, 128, \n 5), 'abc', (1, 1, 1)), ((5, 4, 3, 2, 1, 42), 'btzyxc', (1, 2, 3, 4, 5, \n 24))])\n", (7508, 7647), False, 'import pytest\n'), ((8272, 8453), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axes,halo,offset,scales,reference_tensor"""', "[('x', (0,), (10,), (1.0,), 'forty-two'), ('abc', (1, 1, 1), (1, 2, 3), (\n 1.0, 2.0, 3.0), 'helloworld')]"], {}), "('axes,halo,offset,scales,reference_tensor', [('x',\n (0,), (10,), (1.0,), 'forty-two'), ('abc', (1, 1, 1), (1, 2, 3), (1.0, \n 2.0, 3.0), 'helloworld')])\n", (8295, 8453), False, 'import pytest\n'), ((9382, 9495), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape,axes"""', "[((42,), 'x'), ((42, 128, 5), 'abc'), ((5, 4, 3, 2, 1, 42), 'btzyxc')]"], {}), "('shape,axes', [((42,), 'x'), ((42, 128, 5), 'abc'),\n ((5, 4, 3, 2, 1, 42), 'btzyxc')])\n", (9405, 9495), False, 'import pytest\n'), ((9863, 10034), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_shape,axes,step"""', "[((42,), 'x', (5,)), ((42, 128, 5), 'abc', (1, 2, 3)), ((5, 4, 3, 2, 1, 42),\n 'btzyxc', (15, 24, 33, 42, 51, 642))]"], {}), "('min_shape,axes,step', [((42,), 'x', (5,)), ((42, \n 128, 5), 'abc', (1, 2, 3)), ((5, 4, 3, 2, 1, 42), 'btzyxc', (15, 24, 33,\n 42, 51, 642))])\n", (9886, 10034), False, 'import pytest\n'), ((778, 790), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (787, 790), True, 'import numpy as np\n'), ((1090, 1118), 'numpy.arange', 'np.arange', (['(9)'], {'dtype': 'np_dtype'}), '(9, dtype=np_dtype)\n', (1099, 1118), True, 'import numpy as np\n'), ((1337, 1352), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1345, 1352), True, 'import numpy as np\n'), ((1552, 1580), 'numpy.arange', 'np.arange', (['(9)'], {'dtype': 'np.uint8'}), '(9, dtype=np.uint8)\n', (1561, 1580), True, 'import numpy as np\n'), ((2037, 2082), 'tiktorch.proto.inference_pb2.Tensor', 'inference_pb2.Tensor', ([], {'dtype': '"""int64"""', 'shape': '[]'}), "(dtype='int64', shape=[])\n", (2057, 2082), False, 'from tiktorch.proto import inference_pb2\n'), ((2219, 2231), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (2228, 2231), True, 'import numpy as np\n'), ((2295, 2321), 'tiktorch.converters.pb_tensor_to_numpy', 'pb_tensor_to_numpy', (['parsed'], {}), '(parsed)\n', (2313, 2321), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((2572, 2600), 'numpy.arange', 'np.arange', (['(9)'], {'dtype': 'np_dtype'}), '(9, dtype=np_dtype)\n', (2581, 2600), True, 'import numpy as np\n'), ((2664, 2690), 'tiktorch.converters.pb_tensor_to_numpy', 'pb_tensor_to_numpy', (['tensor'], {}), '(tensor)\n', (2682, 2690), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((2872, 2887), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2880, 2887), True, 'import numpy as np\n'), ((2951, 2977), 'tiktorch.converters.pb_tensor_to_numpy', 'pb_tensor_to_numpy', (['tensor'], {}), '(tensor)\n', (2969, 2977), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((3157, 3180), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3173, 3180), True, 'import numpy as np\n'), ((3244, 3270), 'tiktorch.converters.pb_tensor_to_numpy', 'pb_tensor_to_numpy', (['tensor'], {}), '(tensor)\n', (3262, 3270), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((3280, 3315), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'result_arr'], {}), '(arr, result_arr)\n', (3298, 3315), False, 'from numpy.testing import assert_array_equal\n'), ((3479, 3503), 'tiktorch.converters.xarray_to_pb_tensor', 'xarray_to_pb_tensor', (['arr'], {}), '(arr)\n', (3498, 3503), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((3521, 3543), 'tiktorch.proto.inference_pb2.Tensor', 'inference_pb2.Tensor', ([], {}), '()\n', (3541, 3543), False, 'from tiktorch.proto import inference_pb2\n'), ((4776, 4800), 'tiktorch.converters.xarray_to_pb_tensor', 'xarray_to_pb_tensor', (['arr'], {}), '(arr)\n', (4795, 4800), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((4818, 4840), 'tiktorch.proto.inference_pb2.Tensor', 'inference_pb2.Tensor', ([], {}), '()\n', (4838, 4840), False, 'from tiktorch.proto import inference_pb2\n'), ((5237, 5282), 'tiktorch.proto.inference_pb2.Tensor', 'inference_pb2.Tensor', ([], {'dtype': '"""int64"""', 'shape': '[]'}), "(dtype='int64', shape=[])\n", (5257, 5282), False, 'from tiktorch.proto import inference_pb2\n'), ((5508, 5535), 'tiktorch.converters.pb_tensor_to_xarray', 'pb_tensor_to_xarray', (['parsed'], {}), '(parsed)\n', (5527, 5535), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((5892, 5919), 'tiktorch.converters.pb_tensor_to_xarray', 'pb_tensor_to_xarray', (['tensor'], {}), '(tensor)\n', (5911, 5919), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((6192, 6219), 'tiktorch.converters.pb_tensor_to_xarray', 'pb_tensor_to_xarray', (['tensor'], {}), '(tensor)\n', (6211, 6219), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((6498, 6525), 'tiktorch.converters.pb_tensor_to_xarray', 'pb_tensor_to_xarray', (['tensor'], {}), '(tensor)\n', (6517, 6525), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((6534, 6569), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'result_arr'], {}), '(arr, result_arr)\n', (6552, 6569), False, 'from numpy.testing import assert_array_equal\n'), ((7811, 7855), 'tiktorch.converters.output_shape_to_pb_output_shape', 'output_shape_to_pb_output_shape', (['named_shape'], {}), '(named_shape)\n', (7842, 7855), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((8672, 8716), 'tiktorch.converters.output_shape_to_pb_output_shape', 'output_shape_to_pb_output_shape', (['named_shape'], {}), '(named_shape)\n', (8703, 8716), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((9658, 9700), 'tiktorch.converters.input_shape_to_pb_input_shape', 'input_shape_to_pb_input_shape', (['named_shape'], {}), '(named_shape)\n', (9687, 9700), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((10260, 10302), 'tiktorch.converters.input_shape_to_pb_input_shape', 'input_shape_to_pb_input_shape', (['named_shape'], {}), '(named_shape)\n', (10289, 10302), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((1905, 1930), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1918, 1930), False, 'import pytest\n'), ((1944, 1970), 'tiktorch.converters.pb_tensor_to_numpy', 'pb_tensor_to_numpy', (['tensor'], {}), '(tensor)\n', (1962, 1970), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((2096, 2121), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2109, 2121), False, 'import pytest\n'), ((2135, 2161), 'tiktorch.converters.pb_tensor_to_numpy', 'pb_tensor_to_numpy', (['tensor'], {}), '(tensor)\n', (2153, 2161), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((4239, 4254), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (4247, 4254), True, 'import numpy as np\n'), ((4466, 4494), 'numpy.arange', 'np.arange', (['(9)'], {'dtype': 'np.uint8'}), '(9, dtype=np.uint8)\n', (4475, 4494), True, 'import numpy as np\n'), ((5104, 5129), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5117, 5129), False, 'import pytest\n'), ((5143, 5170), 'tiktorch.converters.pb_tensor_to_xarray', 'pb_tensor_to_xarray', (['tensor'], {}), '(tensor)\n', (5162, 5170), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((5296, 5321), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5309, 5321), False, 'import pytest\n'), ((5335, 5362), 'tiktorch.converters.pb_tensor_to_xarray', 'pb_tensor_to_xarray', (['tensor'], {}), '(tensor)\n', (5354, 5362), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((5433, 5445), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (5442, 5445), True, 'import numpy as np\n'), ((5801, 5829), 'numpy.arange', 'np.arange', (['(9)'], {'dtype': 'np_dtype'}), '(9, dtype=np_dtype)\n', (5810, 5829), True, 'import numpy as np\n'), ((6114, 6129), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (6122, 6129), True, 'import numpy as np\n'), ((6412, 6435), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (6428, 6435), True, 'import numpy as np\n'), ((9295, 9319), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (9308, 9319), False, 'import pytest\n'), ((9337, 9375), 'tiktorch.converters.output_shape_to_pb_output_shape', 'output_shape_to_pb_output_shape', (['shape'], {}), '(shape)\n', (9368, 9375), False, 'from tiktorch.converters import NamedExplicitOutputShape, NamedImplicitOutputShape, NamedParametrizedShape, input_shape_to_pb_input_shape, numpy_to_pb_tensor, output_shape_to_pb_output_shape, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor\n'), ((1827, 1857), 'tiktorch.proto.inference_pb2.NamedInt', 'inference_pb2.NamedInt', ([], {'size': '(1)'}), '(size=1)\n', (1849, 1857), False, 'from tiktorch.proto import inference_pb2\n'), ((1859, 1889), 'tiktorch.proto.inference_pb2.NamedInt', 'inference_pb2.NamedInt', ([], {'size': '(2)'}), '(size=2)\n', (1881, 1889), False, 'from tiktorch.proto import inference_pb2\n'), ((3706, 3718), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (3715, 3718), True, 'import numpy as np\n'), ((5026, 5056), 'tiktorch.proto.inference_pb2.NamedInt', 'inference_pb2.NamedInt', ([], {'size': '(1)'}), '(size=1)\n', (5048, 5056), False, 'from tiktorch.proto import inference_pb2\n'), ((5058, 5088), 'tiktorch.proto.inference_pb2.NamedInt', 'inference_pb2.NamedInt', ([], {'size': '(2)'}), '(size=2)\n', (5080, 5088), False, 'from tiktorch.proto import inference_pb2\n')] |
# third-party imports
from flask import render_template
# local import
from . import home
@home.route('/')
@home.route('/home')
def homepage():
"""
Render the homepage template on the / and /home route
"""
return render_template('home/index.html', title="Welcome")
@home.route('/about')
def about_page():
"""
Render the about template on the /about route
"""
return render_template('home/about.html', title="About")
| [
"flask.render_template"
] | [((231, 282), 'flask.render_template', 'render_template', (['"""home/index.html"""'], {'title': '"""Welcome"""'}), "('home/index.html', title='Welcome')\n", (246, 282), False, 'from flask import render_template\n'), ((401, 450), 'flask.render_template', 'render_template', (['"""home/about.html"""'], {'title': '"""About"""'}), "('home/about.html', title='About')\n", (416, 450), False, 'from flask import render_template\n')] |
############ This code is adapted from
###### http://dfm.io/emcee/current/user/line/
###### to demonstrate the MPI capability of EMCEE with line fitting
import matplotlib
matplotlib.use('Agg')
#import corner
import emcee
import mpi4py
from schwimmbad import MPIPool
#from emcee.utils import MPIPool
import sys
import numpy as np
import time
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534
# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(yerr * yerr)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, m_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
def lnlike(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]
def lnprior(theta):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
time.sleep(1.0) # pause for 1 second, this is for demonstration to make sure that MPI is faster than a singlenode
# caution: please delete this command in your own calculation
# MPI is working great unless the loglikelihood procedure takes more time than the communication among nodes
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = 3, 10
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
with MPIPool() as pool:
#pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr), pool = pool)
#import time
start = time.time()
sampler.run_mcmc(pos, 50)
#pool.close()
end = time.time()
serial_time = end - start
print("2 nodes * 6 tasks * 4 cores with MPI took {0:.1f} seconds".format(serial_time))
#samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
#import matplotlib.pyplot as plt
#plt.figure()
#corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
# truths=[m_true, b_true, np.log(f_true)])
#plt.savefig('./test.png')
| [
"numpy.abs",
"numpy.ones_like",
"numpy.linalg.solve",
"sys.exit",
"numpy.random.rand",
"matplotlib.use",
"numpy.log",
"time.sleep",
"numpy.diag",
"emcee.EnsembleSampler",
"numpy.exp",
"numpy.isfinite",
"time.time",
"schwimmbad.MPIPool",
"numpy.random.randn"
] | [((172, 193), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (186, 193), False, 'import matplotlib\n'), ((681, 701), 'numpy.diag', 'np.diag', (['(yerr * yerr)'], {}), '(yerr * yerr)\n', (688, 701), True, 'import numpy as np\n'), ((569, 587), 'numpy.abs', 'np.abs', (['(f_true * y)'], {}), '(f_true * y)\n', (575, 587), True, 'import numpy as np\n'), ((588, 606), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (603, 606), True, 'import numpy as np\n'), ((619, 637), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (634, 637), True, 'import numpy as np\n'), ((1407, 1422), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (1417, 1422), False, 'import time\n'), ((2016, 2025), 'schwimmbad.MPIPool', 'MPIPool', ([], {}), '()\n', (2023, 2025), False, 'from schwimmbad import MPIPool\n'), ((2141, 2216), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnprob'], {'args': '(x, y, yerr)', 'pool': 'pool'}), '(nwalkers, ndim, lnprob, args=(x, y, yerr), pool=pool)\n', (2162, 2216), False, 'import emcee\n'), ((2249, 2260), 'time.time', 'time.time', ([], {}), '()\n', (2258, 2260), False, 'import time\n'), ((2320, 2331), 'time.time', 'time.time', ([], {}), '()\n', (2329, 2331), False, 'import time\n'), ((492, 509), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (506, 509), True, 'import numpy as np\n'), ((526, 543), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (540, 543), True, 'import numpy as np\n'), ((734, 755), 'numpy.linalg.solve', 'np.linalg.solve', (['C', 'A'], {}), '(C, A)\n', (749, 755), True, 'import numpy as np\n'), ((795, 816), 'numpy.linalg.solve', 'np.linalg.solve', (['C', 'y'], {}), '(C, y)\n', (810, 816), True, 'import numpy as np\n'), ((1131, 1145), 'numpy.log', 'np.log', (['f_true'], {}), '(f_true)\n', (1137, 1145), True, 'import numpy as np\n'), ((1830, 1845), 'numpy.isfinite', 'np.isfinite', (['lp'], {}), '(lp)\n', (1841, 1845), True, 'import numpy as np\n'), ((2114, 2125), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2122, 2125), False, 'import sys\n'), ((654, 669), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (666, 669), True, 'import numpy as np\n'), ((1963, 1984), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (1978, 1984), True, 'import numpy as np\n'), ((937, 952), 'numpy.exp', 'np.exp', (['(2 * lnf)'], {}), '(2 * lnf)\n', (943, 952), True, 'import numpy as np\n'), ((1002, 1020), 'numpy.log', 'np.log', (['inv_sigma2'], {}), '(inv_sigma2)\n', (1008, 1020), True, 'import numpy as np\n')] |
# This file evaluates different models on test dataset
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
def roc_drawer(features=None, normalized_mode="thigh_len"):
"""
this function generates the plot for roc curve, models are svm based on different normalized data
:param features: features chosen to train the model
:param normalized_mode: how the data is normalized, e.g. "thigh_len"
:return: plot TP rate, FP rate, confusion matrix
"""
if features is None:
features = ['0x', '0y', '1x', '1y', '2x', '2y', '3x', '3y', '5x', '5y', '6x', '6y', '8x', '8y', '9x', '9y',
'11x', '11y', '12x', '12y']
# read set
if normalized_mode == "thigh_len":
ADL_train = pd.read_csv("../data/normalized/ADLNormThigh.csv", index_col=0)
Fall_train = pd.read_csv("../data/normalized/FallNormThigh.csv", index_col=0)
ADL_test = pd.read_csv("../data/normalized/ADLNormThighTest.csv", index_col=0)
Fall_test = pd.read_csv("../data/normalized/FallNormThighTest.csv", index_col=0)
elif normalized_mode == "torso_box":
ADL_train = pd.read_csv("../data/normalized/ADLNormTorso.csv", index_col=0)
Fall_train = pd.read_csv("../data/normalized/FallNormTorso.csv", index_col=0)
ADL_test = pd.read_csv("../data/normalized/ADLNormTorsoTest.csv", index_col=0)
Fall_test = pd.read_csv("../data/normalized/FallNormTorsoTest.csv", index_col=0)
else:
ADL_train = pd.read_csv("../data/normalized/ADLNormNone.csv", index_col=0)
Fall_train = pd.read_csv("../data/normalized/FallNormNone.csv", index_col=0)
ADL_test = pd.read_csv("../data/normalized/ADLNormNoneTest.csv", index_col=0)
Fall_test = pd.read_csv("../data/normalized/FallNormNoneTest.csv", index_col=0)
# generate the training set
ADL_train = ADL_train[features]
ADL_train = np.array(ADL_train).tolist()
false_num = len(ADL_train)
Fall_train = Fall_train[features]
Fall_train = np.array(Fall_train).tolist()
true_num = len(Fall_train)
training_set = ADL_train + Fall_train
training_label = []
for i in range(false_num):
training_label.append(0)
for i in range(true_num):
training_label.append(1)
# generate test set
ADL_test = ADL_test[features]
ADL_test = np.array(ADL_test).tolist()
false_num = len(ADL_test)
Fall_test = Fall_test[features]
Fall_test = np.array(Fall_test).tolist()
true_num = len(Fall_test)
test_set = ADL_test + Fall_test
test_label = []
for i in range(false_num):
test_label.append(0)
for i in range(true_num):
test_label.append(1)
svc = svm.SVC()
svc.fit(training_set, training_label)
scores = svc.decision_function(test_set)
fpr, tpr, thresholds = roc_curve(test_label, scores)
label_pred = svc.predict(test_set)
metric = confusion_matrix(test_label, label_pred)
return fpr, tpr, metric
def confusion_metric_drawer(metric_array, plt_name):
TP = metric_array[1][1]
FP = metric_array[0][1]
FN = metric_array[1][0]
TN = metric_array[0][0]
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
fig, ax = plt.subplots(figsize=(3.5, 6))
ax.matshow(metric_array, cmap=plt.cm.Blues, alpha=0.3)
for i in range(metric_array.shape[0]):
for j in range(metric_array.shape[1]):
ax.text(x=j, y=i, s=metric_array[i, j], va='center', ha='center')
plt.xlabel('predicted label')
plt.ylabel('true label')
font = {'color': 'red',
'size': 10,
'family': 'Times New Roman', }
plt.text(-0.5, -1.5, 'Precision: '+str(precision), fontdict=font)
plt.text(-0.5, -1, 'Recall: '+str(recall), fontdict=font)
plt.savefig(plt_name)
plt.show()
plt.cla()
fpr_thigh, tpr_thigh, metric_thigh = roc_drawer(normalized_mode="thigh_len")
fpr_torso, tpr_torso, metric_torso = roc_drawer(normalized_mode="torso_box")
fpr_none, tpr_none, metric_none = roc_drawer(normalized_mode="none")
# draw roc curve
plt.plot(fpr_thigh, tpr_thigh, color='cyan', label='thigh')
plt.plot(fpr_torso, tpr_torso, color='red', label='torso')
plt.plot(fpr_none, tpr_none, color='magenta', label='none')
plt.legend()
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title('ROC curves for fall detection classifier')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.grid(True)
plt.savefig("../output/roc_curves.png")
plt.show()
plt.cla()
# draw confusion matrix
confusion_metric_drawer(metric_thigh, "../output/metric_thigh.png")
confusion_metric_drawer(metric_torso, "../output/metric_torso.png")
confusion_metric_drawer(metric_none, "../output/metric_none.png")
| [
"sklearn.svm.SVC",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((4232, 4291), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr_thigh', 'tpr_thigh'], {'color': '"""cyan"""', 'label': '"""thigh"""'}), "(fpr_thigh, tpr_thigh, color='cyan', label='thigh')\n", (4240, 4291), True, 'from matplotlib import pyplot as plt\n'), ((4292, 4350), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr_torso', 'tpr_torso'], {'color': '"""red"""', 'label': '"""torso"""'}), "(fpr_torso, tpr_torso, color='red', label='torso')\n", (4300, 4350), True, 'from matplotlib import pyplot as plt\n'), ((4351, 4410), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr_none', 'tpr_none'], {'color': '"""magenta"""', 'label': '"""none"""'}), "(fpr_none, tpr_none, color='magenta', label='none')\n", (4359, 4410), True, 'from matplotlib import pyplot as plt\n'), ((4411, 4423), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4421, 4423), True, 'from matplotlib import pyplot as plt\n'), ((4424, 4445), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (4432, 4445), True, 'from matplotlib import pyplot as plt\n'), ((4446, 4467), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (4454, 4467), True, 'from matplotlib import pyplot as plt\n'), ((4468, 4521), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curves for fall detection classifier"""'], {}), "('ROC curves for fall detection classifier')\n", (4477, 4521), True, 'from matplotlib import pyplot as plt\n'), ((4522, 4573), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate (1 - Specificity)"""'], {}), "('False Positive Rate (1 - Specificity)')\n", (4532, 4573), True, 'from matplotlib import pyplot as plt\n'), ((4574, 4620), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate (Sensitivity)"""'], {}), "('True Positive Rate (Sensitivity)')\n", (4584, 4620), True, 'from matplotlib import pyplot as plt\n'), ((4621, 4635), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4629, 4635), True, 'from matplotlib import pyplot as plt\n'), ((4636, 4675), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../output/roc_curves.png"""'], {}), "('../output/roc_curves.png')\n", (4647, 4675), True, 'from matplotlib import pyplot as plt\n'), ((4676, 4686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4684, 4686), True, 'from matplotlib import pyplot as plt\n'), ((4687, 4696), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4694, 4696), True, 'from matplotlib import pyplot as plt\n'), ((2819, 2828), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (2826, 2828), False, 'from sklearn import svm\n'), ((2943, 2972), 'sklearn.metrics.roc_curve', 'roc_curve', (['test_label', 'scores'], {}), '(test_label, scores)\n', (2952, 2972), False, 'from sklearn.metrics import roc_curve\n'), ((3026, 3066), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'label_pred'], {}), '(test_label, label_pred)\n', (3042, 3066), False, 'from sklearn.metrics import confusion_matrix\n'), ((3385, 3415), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.5, 6)'}), '(figsize=(3.5, 6))\n', (3397, 3415), True, 'from matplotlib import pyplot as plt\n'), ((3647, 3676), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted label"""'], {}), "('predicted label')\n", (3657, 3676), True, 'from matplotlib import pyplot as plt\n'), ((3681, 3705), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""true label"""'], {}), "('true label')\n", (3691, 3705), True, 'from matplotlib import pyplot as plt\n'), ((3938, 3959), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plt_name'], {}), '(plt_name)\n', (3949, 3959), True, 'from matplotlib import pyplot as plt\n'), ((3964, 3974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3972, 3974), True, 'from matplotlib import pyplot as plt\n'), ((3979, 3988), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3986, 3988), True, 'from matplotlib import pyplot as plt\n'), ((865, 928), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/ADLNormThigh.csv"""'], {'index_col': '(0)'}), "('../data/normalized/ADLNormThigh.csv', index_col=0)\n", (876, 928), True, 'import pandas as pd\n'), ((950, 1014), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/FallNormThigh.csv"""'], {'index_col': '(0)'}), "('../data/normalized/FallNormThigh.csv', index_col=0)\n", (961, 1014), True, 'import pandas as pd\n'), ((1034, 1101), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/ADLNormThighTest.csv"""'], {'index_col': '(0)'}), "('../data/normalized/ADLNormThighTest.csv', index_col=0)\n", (1045, 1101), True, 'import pandas as pd\n'), ((1122, 1190), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/FallNormThighTest.csv"""'], {'index_col': '(0)'}), "('../data/normalized/FallNormThighTest.csv', index_col=0)\n", (1133, 1190), True, 'import pandas as pd\n'), ((1252, 1315), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/ADLNormTorso.csv"""'], {'index_col': '(0)'}), "('../data/normalized/ADLNormTorso.csv', index_col=0)\n", (1263, 1315), True, 'import pandas as pd\n'), ((1337, 1401), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/FallNormTorso.csv"""'], {'index_col': '(0)'}), "('../data/normalized/FallNormTorso.csv', index_col=0)\n", (1348, 1401), True, 'import pandas as pd\n'), ((1421, 1488), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/ADLNormTorsoTest.csv"""'], {'index_col': '(0)'}), "('../data/normalized/ADLNormTorsoTest.csv', index_col=0)\n", (1432, 1488), True, 'import pandas as pd\n'), ((1509, 1577), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/FallNormTorsoTest.csv"""'], {'index_col': '(0)'}), "('../data/normalized/FallNormTorsoTest.csv', index_col=0)\n", (1520, 1577), True, 'import pandas as pd\n'), ((1608, 1670), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/ADLNormNone.csv"""'], {'index_col': '(0)'}), "('../data/normalized/ADLNormNone.csv', index_col=0)\n", (1619, 1670), True, 'import pandas as pd\n'), ((1692, 1755), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/FallNormNone.csv"""'], {'index_col': '(0)'}), "('../data/normalized/FallNormNone.csv', index_col=0)\n", (1703, 1755), True, 'import pandas as pd\n'), ((1775, 1841), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/ADLNormNoneTest.csv"""'], {'index_col': '(0)'}), "('../data/normalized/ADLNormNoneTest.csv', index_col=0)\n", (1786, 1841), True, 'import pandas as pd\n'), ((1862, 1929), 'pandas.read_csv', 'pd.read_csv', (['"""../data/normalized/FallNormNoneTest.csv"""'], {'index_col': '(0)'}), "('../data/normalized/FallNormNoneTest.csv', index_col=0)\n", (1873, 1929), True, 'import pandas as pd\n'), ((2015, 2034), 'numpy.array', 'np.array', (['ADL_train'], {}), '(ADL_train)\n', (2023, 2034), True, 'import numpy as np\n'), ((2131, 2151), 'numpy.array', 'np.array', (['Fall_train'], {}), '(Fall_train)\n', (2139, 2151), True, 'import numpy as np\n'), ((2461, 2479), 'numpy.array', 'np.array', (['ADL_test'], {}), '(ADL_test)\n', (2469, 2479), True, 'import numpy as np\n'), ((2572, 2591), 'numpy.array', 'np.array', (['Fall_test'], {}), '(Fall_test)\n', (2580, 2591), True, 'import numpy as np\n')] |
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Final Challenge - Time Trials
"""
########################################################################################
# Imports
########################################################################################
import sys
import cv2 as cv
import numpy as np
from enum import IntEnum
sys.path.insert(0, "../../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Colors
########################################################################################
BLUE = ((90, 100, 100), (110, 255, 255)) # The HSV range for the color blue
GREEN = ((40, 50, 50), (80, 255, 255)) # The HSV range for the color green
RED = ((170, 50, 50), (10, 255, 255)) # The HSV range for the color red
PURPLE = ((135, 100, 100), (150, 255, 255)) # The HSV range for the color purple
ORANGE = ((10, 50, 50), (20, 255, 255)) # The HSV range for the color orange
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
COLOR_PRIORITY = [BLUE, RED, GREEN, ORANGE, PURPLE] # Starting color priority
USED_COLORS = (RED, GREEN, BLUE, ORANGE, PURPLE)
MIN_CONTOUR_AREA = 200
CROP_FLOOR = ((465, 0), (rc.camera.get_height(), rc.camera.get_width())) # Crop everything but the floor
CROP_AR_LEFT = ((40, 0), (((2 * rc.camera.get_height())) // 3, rc.camera.get_width() // 2))
CROP_AR_RIGHT = ((40, rc.camera.get_width() // 2), (((2 * rc.camera.get_height())) // 3, rc.camera.get_width()))
CROP_LEFT_FLOOR = ((360, 0), (rc.camera.get_height(), rc.camera.get_width() // 2))
CROP_RIGHT_FLOOR = ((360, rc.camera.get_width() // 2), (rc.camera.get_height(), rc.camera.get_width()))
COLOR = ""
COLOR_TURN = ""
DESIRED_DISTANCE = 35
CONE_DISTANCE = 70
TURN_DISTANCE = 110
ACCEL_DISTANCE = 120
LEFT_COL = int(rc.camera.get_width() * 0.3)
RIGHT_COL = int(rc.camera.get_width() * 0.7)
BOTTOM_ROW = int(rc.camera.get_height() * 0.65)
speed: float = 0.0
angle: float = 0.0
contour_center = None
contour_area = 0
contour_area_ar = 0
corners = 0
counter = 0
center = 0
x = False
# Speeds
MAX_ALIGN_SPEED = 0.8
MIN_ALIGN_SPEED = 0.4
PASS_SPEED = 0.5
FIND_SPEED = 0.2
REVERSE_SPEED = -0.2
NO_CONES_SPEED = 0.4
# Times
REVERSE_BRAKE_TIME = 0.25
SHORT_PASS_TIME = 1.0
LONG_PASS_TIME = 1.2
# Cone finding parameters
MIN_CONTOUR_AREA = 100
MAX_DISTANCE = 250
REVERSE_DISTANCE = 50
STOP_REVERSE_DISTANCE = 60
CLOSE_DISTANCE = 30
FAR_DISTANCE = 120
# >> Variables
counter = 0
red_center = None
red_distance = 0
prev_red_distance = 0
blue_center = None
blue_distance = 0
prev_blue_distance = 0
class State(IntEnum):
line_follow = 0
line_center_fast = 1
line_center_split = 2
line_center_turn = 3
slalom = 4
wall_follow_accel = 5
wall_follow_turn = 6
wall_follow_pass_left = 7
wall_follow_pass_right = 8
finish_sprint = 9
cur_state: State = State.line_follow
class Direction(IntEnum):
"""
AR marker direction
"""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class Mode(IntEnum):
cone_align = 0
cone_pass = 1
cone_blue = 2
cone_red = 3
reverse = 4
no_cones = 5
cur_mode = Mode.cone_align
########################################################################################
# Functions
########################################################################################
def update_contour(image, min_contour_area):
global contour_center
global contour_area
global COLOR_TURN
if image is None:
contour_center = None
contour_area = 0
else:
for color in COLOR_PRIORITY:
contours = rc_utils.find_contours(image, color[0], color[1])
contour = rc_utils.get_largest_contour(contours, min_contour_area)
if contour is not None:
COLOR_TURN = color
contour_center = rc_utils.get_contour_center(contour)
contour_area = rc_utils.get_contour_area(contour)
rc_utils.draw_contour(image, contour)
rc_utils.draw_circle(image, contour_center)
break
else:
contour_center = None
contour_area = 0
def update_contour_ar(image, min_contour_area):
global contour_area_ar
global COLOR
if image is None:
contour_area_ar = 0
else:
for color in COLOR_PRIORITY:
contours_ar = rc_utils.find_contours(image, color[0], color[1])
contour_ar = rc_utils.get_largest_contour(contours_ar, min_contour_area)
if contour_ar is not None:
COLOR = color
contour_area_ar = rc_utils.get_contour_area(contour_ar)
rc_utils.draw_contour(image, contour_ar)
break
else:
contour_area_ar = 0
def update_ar(image):
global corners
global center
ar_directions = []
corners, ids = rc_utils.get_ar_markers(image)
image = rc_utils.draw_ar_markers(image, corners, ids, (0, 255, 0))
for i in range(len(corners)):
ar_directions.append(rc_utils.get_ar_direction(corners[i]))
if len(ar_directions) is not 0:
center = (int((corners[0][0][0][0] + corners[0][0][3][0]) // 2), int((corners[0][0][0][1] + corners[0][0][1][1]) // 2))
return ar_directions
def start():
"""
This function is run once every time the start button is pressed
"""
global speed
global angle
global cur_state
global counter
global cur_mode
cur_state = State.slalom
counter = 0
cur_mode = Mode.cone_align
# Have the car begin at a stop
rc.drive.stop()
color_image = rc.camera.get_color_image()
ar_image = color_image
depth_image = rc.camera.get_depth_image()
scan = rc.lidar.get_samples()
# Print start message
print(">> Final Challenge - Time Trials")
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
global speed
global angle
global cur_state
global counter
global corners
global center
global x
global cur_mode
color_image = rc.camera.get_color_image()
ar_image = color_image
depth_image = rc.camera.get_depth_image()
scan = rc.lidar.get_samples()
forward_dist = rc_utils.get_lidar_average_distance(scan, 0, 5)
back_dist = rc_utils.get_lidar_average_distance(scan, 180, 5)
top_right_dist = rc_utils.get_lidar_average_distance(scan, 40, 20)
bot_right_dist = rc_utils.get_lidar_average_distance(scan, 140, 20)
top_left_dist = rc_utils.get_lidar_average_distance(scan, 300, 20)
bot_left_dist = rc_utils.get_lidar_average_distance(scan, 240, 20)
right_dist = rc_utils.get_lidar_average_distance(scan, 90, 10)
left_dist = rc_utils.get_lidar_average_distance(scan, 270, 10)
ar_directions = update_ar(ar_image)
floor = np.copy(color_image)
floor = rc_utils.crop(floor, CROP_FLOOR[0], CROP_FLOOR[1])
left_floor = np.copy(color_image)
right_floor = np.copy(color_image)
left_floor = rc_utils.crop(left_floor, CROP_LEFT_FLOOR[0], CROP_LEFT_FLOOR[1])
right_floor = rc_utils.crop(right_floor, CROP_RIGHT_FLOOR[0], CROP_RIGHT_FLOOR[1])
cones = np.copy(color_image)
rc.display.show_color_image(cones)
cone_depth = np.copy(depth_image)
cone_depth = rc_utils.crop(cone_depth, (0, LEFT_COL), (BOTTOM_ROW, RIGHT_COL))
cone_closest_point = rc_utils.get_closest_pixel(cone_depth)
cone_distance = rc_utils.get_pixel_average_distance(cone_depth, cone_closest_point)
ar_image_left = 0
ar_image_right = 0
counter += rc.get_delta_time()
if cur_state == State.line_follow:
if len(ar_directions) is not 0:
for direction in ar_directions:
left_color = COLOR_PRIORITY[0]
right_color = COLOR_PRIORITY[4]
if direction == Direction.LEFT:
ar_image_left = np.copy(ar_image)
ar_image_left = rc_utils.crop(ar_image_left, CROP_AR_LEFT[0], CROP_AR_LEFT[1])
update_contour_ar(ar_image_left, 500)
left_color: tuple = COLOR
elif direction == Direction.RIGHT:
ar_image_right = np.copy(ar_image)
ar_image_right = rc_utils.crop(ar_image_right, CROP_AR_RIGHT[0], CROP_AR_RIGHT[1])
update_contour_ar(ar_image_right, 500)
right_color: tuple = COLOR
elif direction == Direction.UP:
line_center_image = np.copy(ar_image)
line_center_image = rc_utils.crop(line_center_image, CROP_AR_RIGHT[0], CROP_AR_RIGHT[1])
update_contour_ar(line_center_image, 500)
x = True
COLOR_PRIORITY.clear()
COLOR_PRIORITY.append(left_color)
for color in USED_COLORS:
if color != left_color or color != right_color:
COLOR_PRIORITY.append(color)
COLOR_PRIORITY.append(right_color)
update_contour(floor, MIN_CONTOUR_AREA)
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1], 0, (3 * rc.camera.get_width()) // 4, -1, 1)
angle = rc_utils.clamp(angle, -1, 1)
else:
angle = 0
if counter >= 13 and 0 < top_left_dist < 200 and x == True:
counter = 0
cur_state = State.line_center_fast
print("State changed")
if abs(angle) < 0.25:
speed = 1
else:
speed = 0.75
elif cur_state == State.line_center_fast:
speed = 0.75
angle = 0
line_center_color = COLOR
COLOR_PRIORITY.clear()
COLOR_PRIORITY.append(line_center_color)
if line_center_color == ORANGE:
COLOR_PRIORITY.append(PURPLE)
else:
COLOR_PRIORITY.append(ORANGE)
update_contour(left_floor, 1)
contour_area_left = contour_area
update_contour(right_floor, 1)
contour_area_right = contour_area
if contour_area_right != 0 and contour_area_left != 0:
angle = rc_utils.remap_range(contour_area_left - contour_area_right, -2000, 2000, -1, 1)
elif contour_area_left != 0 and counter >= 3:
angle = 0.05
elif contour_area_right != 0 and counter >= 3:
angle = -0.05
else:
angle = 0
if len(ar_directions) is not 0 and 0 < forward_dist < 140 and counter >= 3:
cur_state = State.line_center_split
elif COLOR_TURN == COLOR_PRIORITY[1] and counter >= 10:
if len(ar_directions) is not 0:
if 0 < left_dist < 200:
counter = 0
x = False
cur_state = State.slalom
else:
speed = 1
angle = 0.5
else:
cur_state = State.line_center_turn
if 0.15 < abs(angle) < 0.5:
speed = 0.75
elif abs(angle) > 0.5:
speed = 0.5
else:
speed = 1
elif cur_state == State.line_center_split:
if len(ar_directions) is not 0:
for direction in ar_directions:
if direction == Direction.LEFT:
angle = -0.25
elif direction == Direction.RIGHT:
angle = 0.25
elif counter >= 1.125:
cur_state = State.line_center_fast
print("Changed State")
elif cur_state == State.line_center_turn:
update_contour(left_floor, 1)
contour_area_left = contour_area
update_contour(right_floor, 1)
contour_area_right = contour_area
if contour_area_left != 0 and contour_area_right != 0:
speed = 0.5
angle = rc_utils.remap_range(contour_area_left - contour_area_right, -1500, 1500, -1, 1)
if COLOR_TURN == COLOR_PRIORITY[0] and contour_area_right > 200 and contour_area_left > 200:
cur_state = State.line_center_fast
else:
speed = 0.75
angle = -1
print("Left" + str(contour_area_left))
print("Right" + str(contour_area_right))
elif cur_state == State.slalom:
speed = 1
angle = -0.087
if abs(rc.physics.get_linear_acceleration()[2]) > 1:
angle = 0
if len(ar_directions) is not 0:
x = True
if 0 < left_dist < 100 and 0 < right_dist < 100 and x == True:
counter = 0
cur_state = State.wall_follow_accel
elif cur_state == State.wall_follow_accel:
speed = 0.5
right_dif = top_right_dist - bot_right_dist
left_dif = top_left_dist - bot_left_dist
if right_dist > left_dist:
angle = rc_utils.remap_range(left_dif, -DESIRED_DISTANCE, DESIRED_DISTANCE, 1, -1)
else:
angle = rc_utils.remap_range(right_dif, -DESIRED_DISTANCE, DESIRED_DISTANCE, -1, 1)
if abs(angle) < 0.06:
speed = 1
elif 0.06 <= abs(angle) <= 0.15:
speed = 0.75
if len(ar_directions) is not 0:
if forward_dist <= 120:
angle = 0
for direction in ar_directions:
print(forward_dist)
if forward_dist <= 90:
if direction == Direction.LEFT:
counter = 0
cur_state = State.wall_follow_pass_left
elif direction == Direction.RIGHT:
counter = 0
cur_state = State.wall_follow_pass_right
else:
print(direction)
elif 8 <= counter and forward_dist <= TURN_DISTANCE:
cur_state = State.wall_follow_turn
if counter >= 23 and right_dist < 30:
cur_state = State.finish_sprint
elif forward_dist == 0:
cur_state = State.finish_sprint
elif cur_state == State.wall_follow_turn:
speed = 1
top_left = rc_utils.get_lidar_average_distance(scan, 315, 10)
top_right = rc_utils.get_lidar_average_distance(scan, 45, 10)
angle = rc_utils.remap_range(top_right - top_left, -20, 20, -1, 1)
if forward_dist > ACCEL_DISTANCE:
cur_state = State.wall_follow_accel
elif forward_dist <= 20:
speed = -speed
elif cur_state == State.wall_follow_pass_left:
angle = -0.55
if counter >= 1:
cur_state = State.wall_follow_accel
elif cur_state == State.wall_follow_pass_right:
angle = 0.65
if counter >= 1:
cur_state = State.wall_follow_accel
elif cur_state == State.finish_sprint:
speed = 1
angle = -0.02
rc.drive.set_max_speed(0.5)
if -0.05 < angle < 0.05:
angle = 0
angle = rc_utils.clamp(angle, -1, 1)
speed = rc_utils.clamp(speed, -1, 1)
rc.drive.set_speed_angle(speed, angle)
print(
f"State: {cur_state.name}, speed: {speed:.2f}, angle: {angle:2f}"
)
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, None)
rc.go() | [
"numpy.copy",
"racecar_utils.draw_contour",
"sys.path.insert",
"racecar_utils.get_ar_markers",
"racecar_utils.remap_range",
"racecar_utils.get_contour_area",
"racecar_utils.get_lidar_average_distance",
"racecar_utils.clamp",
"racecar_utils.draw_circle",
"racecar_utils.get_closest_pixel",
"racecar_core.create_racecar",
"racecar_utils.get_pixel_average_distance",
"racecar_utils.crop",
"racecar_utils.get_contour_center",
"racecar_utils.draw_ar_markers",
"racecar_utils.get_ar_direction",
"racecar_utils.find_contours",
"racecar_utils.get_largest_contour"
] | [((363, 398), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../library"""'], {}), "(0, '../../library')\n", (378, 398), False, 'import sys\n'), ((1227, 1256), 'racecar_core.create_racecar', 'racecar_core.create_racecar', ([], {}), '()\n', (1254, 1256), False, 'import racecar_core\n'), ((5136, 5166), 'racecar_utils.get_ar_markers', 'rc_utils.get_ar_markers', (['image'], {}), '(image)\n', (5159, 5166), True, 'import racecar_utils as rc_utils\n'), ((5179, 5237), 'racecar_utils.draw_ar_markers', 'rc_utils.draw_ar_markers', (['image', 'corners', 'ids', '(0, 255, 0)'], {}), '(image, corners, ids, (0, 255, 0))\n', (5203, 5237), True, 'import racecar_utils as rc_utils\n'), ((6533, 6580), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(0)', '(5)'], {}), '(scan, 0, 5)\n', (6568, 6580), True, 'import racecar_utils as rc_utils\n'), ((6597, 6646), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(180)', '(5)'], {}), '(scan, 180, 5)\n', (6632, 6646), True, 'import racecar_utils as rc_utils\n'), ((6668, 6717), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(40)', '(20)'], {}), '(scan, 40, 20)\n', (6703, 6717), True, 'import racecar_utils as rc_utils\n'), ((6739, 6789), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(140)', '(20)'], {}), '(scan, 140, 20)\n', (6774, 6789), True, 'import racecar_utils as rc_utils\n'), ((6810, 6860), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(300)', '(20)'], {}), '(scan, 300, 20)\n', (6845, 6860), True, 'import racecar_utils as rc_utils\n'), ((6881, 6931), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(240)', '(20)'], {}), '(scan, 240, 20)\n', (6916, 6931), True, 'import racecar_utils as rc_utils\n'), ((6949, 6998), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(90)', '(10)'], {}), '(scan, 90, 10)\n', (6984, 6998), True, 'import racecar_utils as rc_utils\n'), ((7015, 7065), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(270)', '(10)'], {}), '(scan, 270, 10)\n', (7050, 7065), True, 'import racecar_utils as rc_utils\n'), ((7119, 7139), 'numpy.copy', 'np.copy', (['color_image'], {}), '(color_image)\n', (7126, 7139), True, 'import numpy as np\n'), ((7152, 7202), 'racecar_utils.crop', 'rc_utils.crop', (['floor', 'CROP_FLOOR[0]', 'CROP_FLOOR[1]'], {}), '(floor, CROP_FLOOR[0], CROP_FLOOR[1])\n', (7165, 7202), True, 'import racecar_utils as rc_utils\n'), ((7221, 7241), 'numpy.copy', 'np.copy', (['color_image'], {}), '(color_image)\n', (7228, 7241), True, 'import numpy as np\n'), ((7260, 7280), 'numpy.copy', 'np.copy', (['color_image'], {}), '(color_image)\n', (7267, 7280), True, 'import numpy as np\n'), ((7298, 7363), 'racecar_utils.crop', 'rc_utils.crop', (['left_floor', 'CROP_LEFT_FLOOR[0]', 'CROP_LEFT_FLOOR[1]'], {}), '(left_floor, CROP_LEFT_FLOOR[0], CROP_LEFT_FLOOR[1])\n', (7311, 7363), True, 'import racecar_utils as rc_utils\n'), ((7382, 7450), 'racecar_utils.crop', 'rc_utils.crop', (['right_floor', 'CROP_RIGHT_FLOOR[0]', 'CROP_RIGHT_FLOOR[1]'], {}), '(right_floor, CROP_RIGHT_FLOOR[0], CROP_RIGHT_FLOOR[1])\n', (7395, 7450), True, 'import racecar_utils as rc_utils\n'), ((7464, 7484), 'numpy.copy', 'np.copy', (['color_image'], {}), '(color_image)\n', (7471, 7484), True, 'import numpy as np\n'), ((7541, 7561), 'numpy.copy', 'np.copy', (['depth_image'], {}), '(depth_image)\n', (7548, 7561), True, 'import numpy as np\n'), ((7579, 7644), 'racecar_utils.crop', 'rc_utils.crop', (['cone_depth', '(0, LEFT_COL)', '(BOTTOM_ROW, RIGHT_COL)'], {}), '(cone_depth, (0, LEFT_COL), (BOTTOM_ROW, RIGHT_COL))\n', (7592, 7644), True, 'import racecar_utils as rc_utils\n'), ((7670, 7708), 'racecar_utils.get_closest_pixel', 'rc_utils.get_closest_pixel', (['cone_depth'], {}), '(cone_depth)\n', (7696, 7708), True, 'import racecar_utils as rc_utils\n'), ((7729, 7796), 'racecar_utils.get_pixel_average_distance', 'rc_utils.get_pixel_average_distance', (['cone_depth', 'cone_closest_point'], {}), '(cone_depth, cone_closest_point)\n', (7764, 7796), True, 'import racecar_utils as rc_utils\n'), ((15231, 15259), 'racecar_utils.clamp', 'rc_utils.clamp', (['angle', '(-1)', '(1)'], {}), '(angle, -1, 1)\n', (15245, 15259), True, 'import racecar_utils as rc_utils\n'), ((15272, 15300), 'racecar_utils.clamp', 'rc_utils.clamp', (['speed', '(-1)', '(1)'], {}), '(speed, -1, 1)\n', (15286, 15300), True, 'import racecar_utils as rc_utils\n'), ((3839, 3888), 'racecar_utils.find_contours', 'rc_utils.find_contours', (['image', 'color[0]', 'color[1]'], {}), '(image, color[0], color[1])\n', (3861, 3888), True, 'import racecar_utils as rc_utils\n'), ((3911, 3967), 'racecar_utils.get_largest_contour', 'rc_utils.get_largest_contour', (['contours', 'min_contour_area'], {}), '(contours, min_contour_area)\n', (3939, 3967), True, 'import racecar_utils as rc_utils\n'), ((4618, 4667), 'racecar_utils.find_contours', 'rc_utils.find_contours', (['image', 'color[0]', 'color[1]'], {}), '(image, color[0], color[1])\n', (4640, 4667), True, 'import racecar_utils as rc_utils\n'), ((4693, 4752), 'racecar_utils.get_largest_contour', 'rc_utils.get_largest_contour', (['contours_ar', 'min_contour_area'], {}), '(contours_ar, min_contour_area)\n', (4721, 4752), True, 'import racecar_utils as rc_utils\n'), ((5301, 5338), 'racecar_utils.get_ar_direction', 'rc_utils.get_ar_direction', (['corners[i]'], {}), '(corners[i])\n', (5326, 5338), True, 'import racecar_utils as rc_utils\n'), ((9512, 9540), 'racecar_utils.clamp', 'rc_utils.clamp', (['angle', '(-1)', '(1)'], {}), '(angle, -1, 1)\n', (9526, 9540), True, 'import racecar_utils as rc_utils\n'), ((4072, 4108), 'racecar_utils.get_contour_center', 'rc_utils.get_contour_center', (['contour'], {}), '(contour)\n', (4099, 4108), True, 'import racecar_utils as rc_utils\n'), ((4140, 4174), 'racecar_utils.get_contour_area', 'rc_utils.get_contour_area', (['contour'], {}), '(contour)\n', (4165, 4174), True, 'import racecar_utils as rc_utils\n'), ((4192, 4229), 'racecar_utils.draw_contour', 'rc_utils.draw_contour', (['image', 'contour'], {}), '(image, contour)\n', (4213, 4229), True, 'import racecar_utils as rc_utils\n'), ((4246, 4289), 'racecar_utils.draw_circle', 'rc_utils.draw_circle', (['image', 'contour_center'], {}), '(image, contour_center)\n', (4266, 4289), True, 'import racecar_utils as rc_utils\n'), ((4856, 4893), 'racecar_utils.get_contour_area', 'rc_utils.get_contour_area', (['contour_ar'], {}), '(contour_ar)\n', (4881, 4893), True, 'import racecar_utils as rc_utils\n'), ((4911, 4951), 'racecar_utils.draw_contour', 'rc_utils.draw_contour', (['image', 'contour_ar'], {}), '(image, contour_ar)\n', (4932, 4951), True, 'import racecar_utils as rc_utils\n'), ((10434, 10519), 'racecar_utils.remap_range', 'rc_utils.remap_range', (['(contour_area_left - contour_area_right)', '(-2000)', '(2000)', '(-1)', '(1)'], {}), '(contour_area_left - contour_area_right, -2000, 2000, -1, 1\n )\n', (10454, 10519), True, 'import racecar_utils as rc_utils\n'), ((8180, 8197), 'numpy.copy', 'np.copy', (['ar_image'], {}), '(ar_image)\n', (8187, 8197), True, 'import numpy as np\n'), ((8234, 8296), 'racecar_utils.crop', 'rc_utils.crop', (['ar_image_left', 'CROP_AR_LEFT[0]', 'CROP_AR_LEFT[1]'], {}), '(ar_image_left, CROP_AR_LEFT[0], CROP_AR_LEFT[1])\n', (8247, 8296), True, 'import racecar_utils as rc_utils\n'), ((8489, 8506), 'numpy.copy', 'np.copy', (['ar_image'], {}), '(ar_image)\n', (8496, 8506), True, 'import numpy as np\n'), ((8544, 8609), 'racecar_utils.crop', 'rc_utils.crop', (['ar_image_right', 'CROP_AR_RIGHT[0]', 'CROP_AR_RIGHT[1]'], {}), '(ar_image_right, CROP_AR_RIGHT[0], CROP_AR_RIGHT[1])\n', (8557, 8609), True, 'import racecar_utils as rc_utils\n'), ((12149, 12234), 'racecar_utils.remap_range', 'rc_utils.remap_range', (['(contour_area_left - contour_area_right)', '(-1500)', '(1500)', '(-1)', '(1)'], {}), '(contour_area_left - contour_area_right, -1500, 1500, -1, 1\n )\n', (12169, 12234), True, 'import racecar_utils as rc_utils\n'), ((8804, 8821), 'numpy.copy', 'np.copy', (['ar_image'], {}), '(ar_image)\n', (8811, 8821), True, 'import numpy as np\n'), ((8862, 8930), 'racecar_utils.crop', 'rc_utils.crop', (['line_center_image', 'CROP_AR_RIGHT[0]', 'CROP_AR_RIGHT[1]'], {}), '(line_center_image, CROP_AR_RIGHT[0], CROP_AR_RIGHT[1])\n', (8875, 8930), True, 'import racecar_utils as rc_utils\n'), ((13138, 13212), 'racecar_utils.remap_range', 'rc_utils.remap_range', (['left_dif', '(-DESIRED_DISTANCE)', 'DESIRED_DISTANCE', '(1)', '(-1)'], {}), '(left_dif, -DESIRED_DISTANCE, DESIRED_DISTANCE, 1, -1)\n', (13158, 13212), True, 'import racecar_utils as rc_utils\n'), ((13247, 13322), 'racecar_utils.remap_range', 'rc_utils.remap_range', (['right_dif', '(-DESIRED_DISTANCE)', 'DESIRED_DISTANCE', '(-1)', '(1)'], {}), '(right_dif, -DESIRED_DISTANCE, DESIRED_DISTANCE, -1, 1)\n', (13267, 13322), True, 'import racecar_utils as rc_utils\n'), ((14399, 14449), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(315)', '(10)'], {}), '(scan, 315, 10)\n', (14434, 14449), True, 'import racecar_utils as rc_utils\n'), ((14470, 14519), 'racecar_utils.get_lidar_average_distance', 'rc_utils.get_lidar_average_distance', (['scan', '(45)', '(10)'], {}), '(scan, 45, 10)\n', (14505, 14519), True, 'import racecar_utils as rc_utils\n'), ((14536, 14594), 'racecar_utils.remap_range', 'rc_utils.remap_range', (['(top_right - top_left)', '(-20)', '(20)', '(-1)', '(1)'], {}), '(top_right - top_left, -20, 20, -1, 1)\n', (14556, 14594), True, 'import racecar_utils as rc_utils\n')] |
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget, QMessageBox
from random import choice
from Bullet.Bullets import Bullet
from Constants import *
from Enemy.Enemy import Enemy
from Enemy.move_enemy import MoveEnemy
from GameUpdate import GameUpdateThread, BulletUpdateThread, EnemyBulletUpdateThread
from Level.Level import Level
from Life.Life import Life
from Player.Player import Player
from Player.move_player import MovePlayer
from Score.Score import Score
from Shield.Shield import Shield
from DeusExMachina.DeusExMachine import DeusExMachine, DeusThread
class Game(QWidget):
closeGame = pyqtSignal()
def __init__(self, players_count, parent=None):
QWidget.__init__(self, parent=parent)
self.setWindowTitle("Space invaders")
self.players_count = players_count
self.keys_pressed = set()
self.game_update_thread = GameUpdateThread()
self.game_update_thread.game_update_signal.connect(self.game_update)
self.bullet_game_update_thread = BulletUpdateThread()
self.bullet_game_update_thread.bullet_update_signal.connect(self.bullet_game_update)
self.enemy_bullet_game_update_thread = EnemyBulletUpdateThread()
self.enemy_bullet_game_update_thread.enemy_bullet_update_signal.connect(self.enemy_bullet_game_update)
self.deus_thread = DeusThread()
self.deus_thread.deus_signal.connect(self.init_deus)
self.move_enemy = MoveEnemy()
self.move_enemy.move_signal.connect(self.enemy_game_update)
self.move_player = MovePlayer()
self.move_player.key_pressed_signal.connect(self.player_move_update)
self.random_number = -1
self.hard_quit = False
self.__init__ui()
self.start_threads()
def __init__ui(self):
self.resize(SCREEN_WIDTH, SCREEN_HEIGHT)
self.setStyleSheet("background-color: black;")
self.deus_machine = {}
self.shields = []
self.enemies = []
self.lives = []
self.players = []
self.player_bullets = []
self.enemy_bullets = {}
self.scores = []
self.level = Level(self)
for i in range(self.players_count):
self.players.append(Player(self, i + 1, self.players_count))
self.scores.append(Score(self, i + 1))
self.player_bullets.append([])
self.lives.append([])
self.start_game()
def start_threads(self) -> None:
self.move_enemy.start()
self.move_player.start()
self.game_update_thread.start()
self.bullet_game_update_thread.start()
self.enemy_bullet_game_update_thread.start()
self.deus_thread.start()
def start_game(self) -> None:
for i in range(4):
self.shields.append(Shield(i, self))
for j in range(5):
for i in range(11):
self.enemies.append(Enemy(i, j, self))
for i in range(self.players_count):
for j in range(3):
self.lives[i].append(Life(j, self, i + 1))
self.level.print_level()
def keyPressEvent(self, event):
if not self.move_player.is_done:
self.move_player.add_key_pressed(event.key())
def keyReleaseEvent(self, event):
self.move_player.remove_key_pressed(event.key())
def game_update(self):
if len(self.enemies) == 0:
self.level_up()
return
for i in range(self.players_count):
if self.players[i].life == 0:
self.you_lost(i + 1)
return
keys_to_be_removed = []
for key, value in self.enemy_bullets.items():
enemy = self.enemies[key] if key < len(self.enemies) else None
if value.enemy_game_update(enemy):
keys_to_be_removed.append(key)
for item in keys_to_be_removed:
self.enemy_bullets.pop(item)
def enemy_bullet_game_update(self):
for i in range(self.players_count):
for bullet in self.enemy_bullets.values():
for shield in self.shields:
shield.check_if_shield_is_hit(bullet)
if shield.check_if_shield_is_destroyed():
self.shields.remove(shield)
for life in self.lives[i]:
if self.players[i].check_if_player_is_hit(bullet):
if not self.lives[i] == 0:
life.close()
self.lives[i].remove(life)
else:
life.close()
self.lives[i].remove(life)
self.players[i].close()
def bullet_game_update(self):
for i in range(self.players_count):
for bullet in self.player_bullets[i]:
if bullet.player_game_update():
self.player_bullets[i].remove(bullet)
continue
should_continue = False
for enemy in self.enemies:
if enemy.check_if_enemy_is_hit(bullet):
bullet.close()
self.player_bullets[i].remove(bullet)
self.scores[i].print_results(enemy.type)
should_continue = True
self.enemies.remove(enemy)
if should_continue:
continue
for shield in self.shields:
if shield.check_if_shield_is_hit(bullet):
bullet.close()
self.player_bullets[i].remove(bullet)
should_continue = True
if shield.check_if_shield_is_destroyed():
self.shields.remove(shield)
if should_continue:
continue
def enemy_game_update(self):
if not self.enemies:
return
elif len(self.enemies) == 1:
if len(self.enemy_bullets) == 0:
self.enemy_bullets[0] = Bullet(50, 50, self, True)
elif self.level.level - len(self.enemy_bullets) > 0:
bullets_missing = self.level.level - len(self.enemy_bullets)
random_bullet_number_to_be_spawned = choice([*range(0, bullets_missing, 1)]) if bullets_missing > 1 else 1
for i in range(random_bullet_number_to_be_spawned):
num = -1
if len(self.enemy_bullets) == 0:
num = choice([*range(0, len(self.enemies), 1)])
self.enemy_bullets[num] = Bullet(50, 50, self, True)
elif len(self.enemies) == 1:
self.enemy_bullets[0] = Bullet(50, 50, self, True)
else:
tries = 5
while (num in self.enemy_bullets.keys() or num == -1) and tries > 0:
num = choice([*range(0, len(self.enemies), 1)])
tries -= 1
if tries > 0:
self.enemy_bullets[num] = Bullet(50, 50, self, True)
for enemy in self.enemies:
if not self.shields:
if enemy.game_update(self.players[0].y() - self.players[0].height()):
self.you_lost()
else:
if enemy.game_update(self.players[0].y() - self.players[0].height(),
self.shields[0].y() - self.shields[0].height()):
self.you_lost()
def player_move_update(self, key):
for i in range(self.players_count):
bullet = self.players[i].game_update(key, len(self.player_bullets[i]), self.level.level)
if bullet:
self.player_bullets[i].append(bullet)
def init_deus(self, x):
if self.level.level not in self.deus_machine.keys():
if self.random_number == -1:
self.random_number = choice([*range(0, 99, 1)])
else:
self.random_number -= 1
if self.random_number == 0:
self.deus_thread.should_generate = True
if x != -1:
self.deus_machine[self.level.level] = DeusExMachine(x, self)
delete_deus_keys = []
for key, deus in self.deus_machine.items():
if deus is not None:
for i in range(self.players_count):
if deus.is_hit(self.players[i]):
delete_deus_keys.append(key)
if self.players[i].life == 3:
self.scores[i].print_deluxe()
else:
self.lives[i].insert(0, Life(abs(len(self.lives[i]) - 2), self, i + 1))
self.players[i].life += 1
break
for key in delete_deus_keys:
self.deus_machine[key].close()
self.deus_machine[key] = None
def level_up(self):
self.clear_screen()
self.level.level_up()
self.move_enemy.increment_speed()
self.enemy_bullet_game_update_thread.increment_speed()
self.start_game()
def you_lost(self, player=None):
self.clear_screen()
self.pause_threads()
close = QMessageBox()
close.setWindowTitle("Game over")
if player is None:
message = "The enemies have won, you can try again!"
else:
message = "Player " + str(player) + " lost. The current score is:\n"
for i in range(self.players_count):
message += "Player {}: {}, ".format(i + 1, self.scores[i].score)
message += "\nDo you want to play a new game?"
close.setText(message)
close.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
close.setDefaultButton(QMessageBox.Yes)
close = close.exec()
if close == QMessageBox.No:
self.hard_quit = True
self.close_game()
self.close()
else:
self.reset_game()
self.unpause_threads()
self.start_game()
def pause_threads(self):
self.game_update_thread.game_pause = True
def unpause_threads(self):
self.game_update_thread.game_pause = False
def reset_game(self):
self.level.reset_level()
for i in range(self.players_count):
self.players[i].reset_lives()
self.scores[i].reset_score()
self.move_enemy.reset_speed()
self.enemy_bullet_game_update_thread.reset_speed()
def clear_screen(self):
for bullet in self.enemy_bullets.values():
bullet.close()
self.enemy_bullets.clear()
for i in range(self.players_count):
for bullet in self.player_bullets[i]:
bullet.close()
self.player_bullets[i].clear()
for life in self.lives[i]:
life.close()
self.lives[i].clear()
for enemy in self.enemies:
enemy.close()
self.enemies.clear()
for shield in self.shields:
shield.close()
self.shields.clear()
delete_deus_keys = []
for key, deus in self.deus_machine.items():
if deus is not None:
delete_deus_keys.append(key)
for key in delete_deus_keys:
self.deus_machine[key].close()
self.deus_machine[key] = None
def closeEvent(self, event):
if self.hard_quit:
self.close_game()
else:
close = QMessageBox()
close.setWindowTitle("Are you sure you want to quit?")
close.setText("Are you sure you want to quit")
close.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
close.setDefaultButton(QMessageBox.Yes)
close = close.exec()
if close == QMessageBox.Yes:
self.close_game()
else:
event.ignore()
def close_game(self):
self.move_enemy.die()
self.move_player.die()
self.bullet_game_update_thread.die()
self.enemy_bullet_game_update_thread.die()
self.game_update_thread.die()
self.deus_thread.die()
self.closeGame.emit()
| [
"PyQt5.QtCore.pyqtSignal",
"DeusExMachina.DeusExMachine.DeusExMachine",
"GameUpdate.GameUpdateThread",
"PyQt5.QtWidgets.QMessageBox",
"Life.Life.Life",
"Player.Player.Player",
"Enemy.Enemy.Enemy",
"PyQt5.QtWidgets.QWidget.__init__",
"GameUpdate.BulletUpdateThread",
"Shield.Shield.Shield",
"Score.Score.Score",
"Bullet.Bullets.Bullet",
"Player.move_player.MovePlayer",
"GameUpdate.EnemyBulletUpdateThread",
"Level.Level.Level",
"Enemy.move_enemy.MoveEnemy",
"DeusExMachina.DeusExMachine.DeusThread"
] | [((624, 636), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (634, 636), False, 'from PyQt5.QtCore import pyqtSignal\n'), ((698, 735), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (714, 735), False, 'from PyQt5.QtWidgets import QWidget, QMessageBox\n'), ((894, 912), 'GameUpdate.GameUpdateThread', 'GameUpdateThread', ([], {}), '()\n', (910, 912), False, 'from GameUpdate import GameUpdateThread, BulletUpdateThread, EnemyBulletUpdateThread\n'), ((1032, 1052), 'GameUpdate.BulletUpdateThread', 'BulletUpdateThread', ([], {}), '()\n', (1050, 1052), False, 'from GameUpdate import GameUpdateThread, BulletUpdateThread, EnemyBulletUpdateThread\n'), ((1194, 1219), 'GameUpdate.EnemyBulletUpdateThread', 'EnemyBulletUpdateThread', ([], {}), '()\n', (1217, 1219), False, 'from GameUpdate import GameUpdateThread, BulletUpdateThread, EnemyBulletUpdateThread\n'), ((1359, 1371), 'DeusExMachina.DeusExMachine.DeusThread', 'DeusThread', ([], {}), '()\n', (1369, 1371), False, 'from DeusExMachina.DeusExMachine import DeusExMachine, DeusThread\n'), ((1460, 1471), 'Enemy.move_enemy.MoveEnemy', 'MoveEnemy', ([], {}), '()\n', (1469, 1471), False, 'from Enemy.move_enemy import MoveEnemy\n'), ((1568, 1580), 'Player.move_player.MovePlayer', 'MovePlayer', ([], {}), '()\n', (1578, 1580), False, 'from Player.move_player import MovePlayer\n'), ((2152, 2163), 'Level.Level.Level', 'Level', (['self'], {}), '(self)\n', (2157, 2163), False, 'from Level.Level import Level\n'), ((9319, 9332), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (9330, 9332), False, 'from PyQt5.QtWidgets import QWidget, QMessageBox\n'), ((11608, 11621), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (11619, 11621), False, 'from PyQt5.QtWidgets import QWidget, QMessageBox\n'), ((2241, 2280), 'Player.Player.Player', 'Player', (['self', '(i + 1)', 'self.players_count'], {}), '(self, i + 1, self.players_count)\n', (2247, 2280), False, 'from Player.Player import Player\n'), ((2313, 2331), 'Score.Score.Score', 'Score', (['self', '(i + 1)'], {}), '(self, i + 1)\n', (2318, 2331), False, 'from Score.Score import Score\n'), ((2807, 2822), 'Shield.Shield.Shield', 'Shield', (['i', 'self'], {}), '(i, self)\n', (2813, 2822), False, 'from Shield.Shield import Shield\n'), ((8250, 8272), 'DeusExMachina.DeusExMachine.DeusExMachine', 'DeusExMachine', (['x', 'self'], {}), '(x, self)\n', (8263, 8272), False, 'from DeusExMachina.DeusExMachine import DeusExMachine, DeusThread\n'), ((2919, 2936), 'Enemy.Enemy.Enemy', 'Enemy', (['i', 'j', 'self'], {}), '(i, j, self)\n', (2924, 2936), False, 'from Enemy.Enemy import Enemy\n'), ((3050, 3070), 'Life.Life.Life', 'Life', (['j', 'self', '(i + 1)'], {}), '(j, self, i + 1)\n', (3054, 3070), False, 'from Life.Life import Life\n'), ((6111, 6137), 'Bullet.Bullets.Bullet', 'Bullet', (['(50)', '(50)', 'self', '(True)'], {}), '(50, 50, self, True)\n', (6117, 6137), False, 'from Bullet.Bullets import Bullet\n'), ((6643, 6669), 'Bullet.Bullets.Bullet', 'Bullet', (['(50)', '(50)', 'self', '(True)'], {}), '(50, 50, self, True)\n', (6649, 6669), False, 'from Bullet.Bullets import Bullet\n'), ((6759, 6785), 'Bullet.Bullets.Bullet', 'Bullet', (['(50)', '(50)', 'self', '(True)'], {}), '(50, 50, self, True)\n', (6765, 6785), False, 'from Bullet.Bullets import Bullet\n'), ((7118, 7144), 'Bullet.Bullets.Bullet', 'Bullet', (['(50)', '(50)', 'self', '(True)'], {}), '(50, 50, self, True)\n', (7124, 7144), False, 'from Bullet.Bullets import Bullet\n')] |
from PIL import Image
im=Image.open("3.gif")
im.save("pic{:02}.png".format(im.tell()))
| [
"PIL.Image.open"
] | [((25, 44), 'PIL.Image.open', 'Image.open', (['"""3.gif"""'], {}), "('3.gif')\n", (35, 44), False, 'from PIL import Image\n')] |
"""This module contains the class definition for Comments."""
from sqlalchemy import Column, Integer, Text, ForeignKey
from server.models.Base import BaseModel
class CommentModel(BaseModel):
"""This class represent the db model for a Comment."""
__tablename__ = 'comments'
id = Column(Integer, primary_key=True)
body = Column(Text(500))
author_id = Column(Integer, ForeignKey('users.id'))
devotional_id = Column(Integer, ForeignKey('devotionals.id'))
| [
"sqlalchemy.Text",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] | [((292, 325), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (298, 325), False, 'from sqlalchemy import Column, Integer, Text, ForeignKey\n'), ((344, 353), 'sqlalchemy.Text', 'Text', (['(500)'], {}), '(500)\n', (348, 353), False, 'from sqlalchemy import Column, Integer, Text, ForeignKey\n'), ((387, 409), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (397, 409), False, 'from sqlalchemy import Column, Integer, Text, ForeignKey\n'), ((447, 475), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""devotionals.id"""'], {}), "('devotionals.id')\n", (457, 475), False, 'from sqlalchemy import Column, Integer, Text, ForeignKey\n')] |
import os
import re
import urllib
import urlparse
from django.http import HttpResponseRedirect
from facetools.url import translate_url_to_facebook_url, facebook_redirect
GET_REDIRECT_PARAM = 'facebook_redirect'
class FandjangoIntegrationMiddleware(object):
def process_response(self, request, response):
if self.should_process_response(response):
# Get the oauth url fandjango is redirecting the user to
oauth_url = self.get_oauth_url(response.content, "window.parent.location =", ';')
# Update the oauth url so that it's url it goes to after
# the user authorizes the app is a translated facebook url
redirect_uri = urlparse.parse_qs(urlparse.urlparse(oauth_url).query)['redirect_uri'][0]
path = urlparse.urlparse(redirect_uri).path
path = '/' + "/".join(path.split("/")[2:])
if not path.endswith("/"): path = path + "/"
new_url = translate_url_to_facebook_url(path)
# Replace the old url with the new one
start_token = "redirect_uri="
start = oauth_url.index(start_token) + len(start_token)
end = oauth_url.index("&", start)
new_oauth_url = oauth_url.replace(oauth_url[start:end], urllib.quote_plus(new_url))
response.content = response.content.replace(oauth_url, new_oauth_url)
response.status_code = 200
return response
def should_process_response(self, response):
# We're going to compare this response's content to Fandjango's template
# for authorization and see if they are the same. First we need to find
# the dynamic content in the template and in this response's html
try:
oauth_url_to_remove = self.get_oauth_url(response.content, 'window.parent.location = ', ';')
except:
return False
if response.content.count(oauth_url_to_remove) != 2:
return False
import fandjango
template_path = os.path.join(os.path.dirname(fandjango.__file__), "templates/fandjango/authorize_application.html")
with open(template_path) as fandjango_temlate_file:
fandjango_template_content = fandjango_temlate_file.read()
template_tags_to_remove = re.findall("\{\{.*url\|safe \}\}", fandjango_template_content)
if len(template_tags_to_remove) != 2:
return False
# Strip out the dynamic content
response_template_content = response.content.replace(oauth_url_to_remove, "")
for template_tag_to_remove in template_tags_to_remove:
fandjango_template_content = fandjango_template_content.replace(template_tag_to_remove, "")
# If the response minus its dynamic content is identical to Fandjango's
# template minus its dynamic content then we should process it
return response_template_content == fandjango_template_content
def get_oauth_url(self, content, start_token, end_token):
start = content.index(start_token) + len(start_token)
end = content.index(end_token, start)
return content[start:end].strip()[1:-1] # remove any whitespace and quotes around the url | [
"urllib.quote_plus",
"facetools.url.translate_url_to_facebook_url",
"os.path.dirname",
"re.findall",
"urlparse.urlparse"
] | [((2288, 2355), 're.findall', 're.findall', (['"""\\\\{\\\\{.*url\\\\|safe \\\\}\\\\}"""', 'fandjango_template_content'], {}), "('\\\\{\\\\{.*url\\\\|safe \\\\}\\\\}', fandjango_template_content)\n", (2298, 2355), False, 'import re\n'), ((957, 992), 'facetools.url.translate_url_to_facebook_url', 'translate_url_to_facebook_url', (['path'], {}), '(path)\n', (986, 992), False, 'from facetools.url import translate_url_to_facebook_url, facebook_redirect\n'), ((2036, 2071), 'os.path.dirname', 'os.path.dirname', (['fandjango.__file__'], {}), '(fandjango.__file__)\n', (2051, 2071), False, 'import os\n'), ((786, 817), 'urlparse.urlparse', 'urlparse.urlparse', (['redirect_uri'], {}), '(redirect_uri)\n', (803, 817), False, 'import urlparse\n'), ((1269, 1295), 'urllib.quote_plus', 'urllib.quote_plus', (['new_url'], {}), '(new_url)\n', (1286, 1295), False, 'import urllib\n'), ((712, 740), 'urlparse.urlparse', 'urlparse.urlparse', (['oauth_url'], {}), '(oauth_url)\n', (729, 740), False, 'import urlparse\n')] |
import sys
import secrets
from htmltreediff import diff
def main(argv=None):
if not argv:
argv = sys.argv # pragma: no cover
with open(argv[1]) as file_a:
html_a = file_a.read()
with open(argv[2]) as file_b:
html_b = file_b.read()
output_filename = f"tmp/doc_diff/output_{secrets.token_urlsafe(6)}.html"
f = open(output_filename, "w")
f.write(diff(html_a, html_b, cutoff=0.0, pretty=True))
f.close()
print(output_filename)
if __name__ == '__main__':
main() # pragma: no cover
| [
"secrets.token_urlsafe",
"htmltreediff.diff"
] | [((392, 437), 'htmltreediff.diff', 'diff', (['html_a', 'html_b'], {'cutoff': '(0.0)', 'pretty': '(True)'}), '(html_a, html_b, cutoff=0.0, pretty=True)\n', (396, 437), False, 'from htmltreediff import diff\n'), ((313, 337), 'secrets.token_urlsafe', 'secrets.token_urlsafe', (['(6)'], {}), '(6)\n', (334, 337), False, 'import secrets\n')] |
from Jumpscale import j
import nacl
def chat(bot):
"""
This chat is to deploy 3bot container on the grid
"""
explorer = j.clients.explorer.default
cl = j.clients.s3.get("deployer")
AWS_ID = cl.accesskey_
AWS_SECRET = cl.secretkey_
user_info = bot.user_info()
name = user_info["username"]
email = user_info["email"]
choose = ["Deploy a new 3bot", "Restore my 3bot"]
ip_range_choose = ["Specify IP Range", "Choose IP Range for me"]
expiration = j.data.time.epoch + (60 * 60 * 24) # for one day
backup_directory = name.replace(".", "_")
env = dict()
secret_env = dict()
identity = j.sal.reservation_chatflow.validate_user(user_info)
user_choice = bot.single_choice("This wizard will help you deploy or restore your 3bot.", choose)
identity_pubkey = identity.pubkey
if user_choice == "Restore my 3bot":
password = bot.secret_ask("Please enter the password you configured to backup your 3bot")
hash_restore = nacl.hash.blake2b(password.encode(), key=identity_pubkey.encode()).decode()
network = j.sal.reservation_chatflow.network_select(bot, identity.id)
if not network:
return
currency = network.currency
farms = j.sal.reservation_chatflow.farms_select(bot)
# ask user about corex user:password and ssh-key to give him full access to his container
pub_key = None
while not pub_key:
pub_key = bot.upload_file(
""""Please add your public ssh key, this will allow you to access the deployed container using ssh.
Just upload the file with the key"""
).split("\n")[0]
form = bot.new_form()
user_corex = form.string_ask(
"Please create a username for your 3bot (this will allow you secure access to the 3bot from your web browser)"
)
password = form.secret_ask("Please create a password for your 3bot")
form.ask()
# create new reservation
reservation = j.sal.zosv2.reservation_create()
node_selected = j.sal.reservation_chatflow.nodes_get(1, farm_names=farms, cru=4, sru=8, currency=currency)[0]
if not node_selected:
res = "# We are sorry we don't have empty Node to deploy your 3bot"
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
return
network.add_node(node_selected)
ip_address = network.ask_ip_from_node(node_selected, "Please choose IP Address for your solution")
# Encrypt AWS ID and AWS Secret to send it in secret env
aws_id_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, AWS_ID)
aws_secret_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, AWS_SECRET)
user_corex_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, user_corex.value)
password_corex_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, password.value)
# Create network of reservation and add peers
if user_choice == "Restore my 3bot":
hash_encrypt = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, hash_restore)
env.update({"restore": "True"})
secret_env.update({"HASH": hash_encrypt})
backup = bot.single_choice("Do you want your 3bot to be automatically backed up?", ["Yes", "No"])
if backup == "Yes":
password = bot.secret_ask(
"""The password you add here will be used to encrypt your backup to keep your 3bot safe.
please make sure to keep this password safe so you can later restore your 3bot.
Remember, this password will not be saved anywhere, so there cannot be recovery for it"""
)
hash_backup = nacl.hash.blake2b(password.encode(), key=identity_pubkey.encode()).decode()
hash_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, hash_backup)
secret_env.update({"HASH": hash_encrypted})
env.update({"backup": "True", "FOLDER": backup_directory})
env.update({"pub_key": pub_key, "botname": name, "botemail": email})
secret_env.update(
{
"AWS_ID": aws_id_encrypted,
"AWS_SECRET": aws_secret_encrypted,
"corex_password": password_corex_encrypted,
"corex_user": user_corex_encrypted,
}
)
container_flist = "https://hub.grid.tf/bola_nasr_1/threefoldtech-3bot2-corex.flist"
entry_point = "/usr/bin/zinit init -d"
storage_url = "zdb://hub.grid.tf:9900"
network.update(identity.id, bot=bot)
# Add volume and create container schema
vol = j.sal.zosv2.volume.create(reservation, node_selected.node_id, size=8)
reservation_create = j.sal.reservation_chatflow.reservation_register(
reservation, expiration, customer_tid=identity.id, currency=currency, bot=bot
)
rid = reservation_create.reservation_id
payment = j.sal.reservation_chatflow.payments_show(bot, reservation_create, currency)
if payment["free"]:
pass
elif payment["wallet"]:
j.sal.zosv2.billing.payout_farmers(payment["wallet"], reservation_create)
j.sal.reservation_chatflow.payment_wait(bot, rid, threebot_app=False)
else:
j.sal.reservation_chatflow.payment_wait(bot, rid, threebot_app=True, reservation_create_resp=reservation_create)
j.sal.reservation_chatflow.reservation_wait(bot, rid)
# create container
cont = j.sal.zosv2.container.create(
reservation=reservation,
node_id=node_selected.node_id,
network_name=network.name,
ip_address=ip_address,
flist=container_flist,
storage_url=storage_url,
env=env,
entrypoint=entry_point,
cpu=4,
memory=4096,
public_ipv6=True,
secret_env=secret_env,
)
volume_id = f"{rid}-{vol.workload_id}"
j.sal.zosv2.volume.attach_existing(cont, volume_id, "/sandbox/var")
reservation_create = j.sal.reservation_chatflow.reservation_register(
reservation, expiration, customer_tid=identity.id, currency=currency, bot=bot
)
resv_id = reservation_create.reservation_id
payment = j.sal.reservation_chatflow.payments_show(bot, reservation_create, currency)
if payment["free"]:
pass
elif payment["wallet"]:
j.sal.zosv2.billing.payout_farmers(payment["wallet"], reservation_create)
j.sal.reservation_chatflow.payment_wait(bot, resv_id, threebot_app=False)
else:
j.sal.reservation_chatflow.payment_wait(
bot, resv_id, threebot_app=True, reservation_create_resp=reservation_create
)
j.sal.reservation_chatflow.reservation_wait(bot, resv_id)
res = f"""
# reservation sent. ID: {resv_id}
# your 3bot container is ready. please continue initialization on ```{ip_address}:8000``` It may take a few minutes.
"""
bot.md_show(j.core.text.strip(res))
| [
"Jumpscale.j.sal.zosv2.billing.payout_farmers",
"Jumpscale.j.sal.reservation_chatflow.payments_show",
"Jumpscale.j.sal.reservation_chatflow.reservation_wait",
"Jumpscale.j.sal.reservation_chatflow.payment_wait",
"Jumpscale.j.sal.reservation_chatflow.farms_select",
"Jumpscale.j.sal.zosv2.container.encrypt_secret",
"Jumpscale.j.sal.zosv2.volume.create",
"Jumpscale.j.sal.reservation_chatflow.nodes_get",
"Jumpscale.j.core.text.strip",
"Jumpscale.j.clients.s3.get",
"Jumpscale.j.sal.reservation_chatflow.validate_user",
"Jumpscale.j.sal.reservation_chatflow.network_select",
"Jumpscale.j.sal.zosv2.container.create",
"Jumpscale.j.sal.zosv2.reservation_create",
"Jumpscale.j.sal.zosv2.volume.attach_existing",
"Jumpscale.j.sal.reservation_chatflow.reservation_register"
] | [((174, 202), 'Jumpscale.j.clients.s3.get', 'j.clients.s3.get', (['"""deployer"""'], {}), "('deployer')\n", (190, 202), False, 'from Jumpscale import j\n'), ((653, 704), 'Jumpscale.j.sal.reservation_chatflow.validate_user', 'j.sal.reservation_chatflow.validate_user', (['user_info'], {}), '(user_info)\n', (693, 704), False, 'from Jumpscale import j\n'), ((1099, 1158), 'Jumpscale.j.sal.reservation_chatflow.network_select', 'j.sal.reservation_chatflow.network_select', (['bot', 'identity.id'], {}), '(bot, identity.id)\n', (1140, 1158), False, 'from Jumpscale import j\n'), ((1238, 1282), 'Jumpscale.j.sal.reservation_chatflow.farms_select', 'j.sal.reservation_chatflow.farms_select', (['bot'], {}), '(bot)\n', (1277, 1282), False, 'from Jumpscale import j\n'), ((1967, 1999), 'Jumpscale.j.sal.zosv2.reservation_create', 'j.sal.zosv2.reservation_create', ([], {}), '()\n', (1997, 1999), False, 'from Jumpscale import j\n'), ((2548, 2615), 'Jumpscale.j.sal.zosv2.container.encrypt_secret', 'j.sal.zosv2.container.encrypt_secret', (['node_selected.node_id', 'AWS_ID'], {}), '(node_selected.node_id, AWS_ID)\n', (2584, 2615), False, 'from Jumpscale import j\n'), ((2643, 2714), 'Jumpscale.j.sal.zosv2.container.encrypt_secret', 'j.sal.zosv2.container.encrypt_secret', (['node_selected.node_id', 'AWS_SECRET'], {}), '(node_selected.node_id, AWS_SECRET)\n', (2679, 2714), False, 'from Jumpscale import j\n'), ((2742, 2819), 'Jumpscale.j.sal.zosv2.container.encrypt_secret', 'j.sal.zosv2.container.encrypt_secret', (['node_selected.node_id', 'user_corex.value'], {}), '(node_selected.node_id, user_corex.value)\n', (2778, 2819), False, 'from Jumpscale import j\n'), ((2851, 2926), 'Jumpscale.j.sal.zosv2.container.encrypt_secret', 'j.sal.zosv2.container.encrypt_secret', (['node_selected.node_id', 'password.value'], {}), '(node_selected.node_id, password.value)\n', (2887, 2926), False, 'from Jumpscale import j\n'), ((4575, 4644), 'Jumpscale.j.sal.zosv2.volume.create', 'j.sal.zosv2.volume.create', (['reservation', 'node_selected.node_id'], {'size': '(8)'}), '(reservation, node_selected.node_id, size=8)\n', (4600, 4644), False, 'from Jumpscale import j\n'), ((4670, 4800), 'Jumpscale.j.sal.reservation_chatflow.reservation_register', 'j.sal.reservation_chatflow.reservation_register', (['reservation', 'expiration'], {'customer_tid': 'identity.id', 'currency': 'currency', 'bot': 'bot'}), '(reservation, expiration,\n customer_tid=identity.id, currency=currency, bot=bot)\n', (4717, 4800), False, 'from Jumpscale import j\n'), ((4869, 4944), 'Jumpscale.j.sal.reservation_chatflow.payments_show', 'j.sal.reservation_chatflow.payments_show', (['bot', 'reservation_create', 'currency'], {}), '(bot, reservation_create, currency)\n', (4909, 4944), False, 'from Jumpscale import j\n'), ((5306, 5359), 'Jumpscale.j.sal.reservation_chatflow.reservation_wait', 'j.sal.reservation_chatflow.reservation_wait', (['bot', 'rid'], {}), '(bot, rid)\n', (5349, 5359), False, 'from Jumpscale import j\n'), ((5395, 5686), 'Jumpscale.j.sal.zosv2.container.create', 'j.sal.zosv2.container.create', ([], {'reservation': 'reservation', 'node_id': 'node_selected.node_id', 'network_name': 'network.name', 'ip_address': 'ip_address', 'flist': 'container_flist', 'storage_url': 'storage_url', 'env': 'env', 'entrypoint': 'entry_point', 'cpu': '(4)', 'memory': '(4096)', 'public_ipv6': '(True)', 'secret_env': 'secret_env'}), '(reservation=reservation, node_id=node_selected\n .node_id, network_name=network.name, ip_address=ip_address, flist=\n container_flist, storage_url=storage_url, env=env, entrypoint=\n entry_point, cpu=4, memory=4096, public_ipv6=True, secret_env=secret_env)\n', (5423, 5686), False, 'from Jumpscale import j\n'), ((5823, 5890), 'Jumpscale.j.sal.zosv2.volume.attach_existing', 'j.sal.zosv2.volume.attach_existing', (['cont', 'volume_id', '"""/sandbox/var"""'], {}), "(cont, volume_id, '/sandbox/var')\n", (5857, 5890), False, 'from Jumpscale import j\n'), ((5917, 6047), 'Jumpscale.j.sal.reservation_chatflow.reservation_register', 'j.sal.reservation_chatflow.reservation_register', (['reservation', 'expiration'], {'customer_tid': 'identity.id', 'currency': 'currency', 'bot': 'bot'}), '(reservation, expiration,\n customer_tid=identity.id, currency=currency, bot=bot)\n', (5964, 6047), False, 'from Jumpscale import j\n'), ((6120, 6195), 'Jumpscale.j.sal.reservation_chatflow.payments_show', 'j.sal.reservation_chatflow.payments_show', (['bot', 'reservation_create', 'currency'], {}), '(bot, reservation_create, currency)\n', (6160, 6195), False, 'from Jumpscale import j\n'), ((6587, 6644), 'Jumpscale.j.sal.reservation_chatflow.reservation_wait', 'j.sal.reservation_chatflow.reservation_wait', (['bot', 'resv_id'], {}), '(bot, resv_id)\n', (6630, 6644), False, 'from Jumpscale import j\n'), ((2020, 2114), 'Jumpscale.j.sal.reservation_chatflow.nodes_get', 'j.sal.reservation_chatflow.nodes_get', (['(1)'], {'farm_names': 'farms', 'cru': '(4)', 'sru': '(8)', 'currency': 'currency'}), '(1, farm_names=farms, cru=4, sru=8,\n currency=currency)\n', (2056, 2114), False, 'from Jumpscale import j\n'), ((3042, 3115), 'Jumpscale.j.sal.zosv2.container.encrypt_secret', 'j.sal.zosv2.container.encrypt_secret', (['node_selected.node_id', 'hash_restore'], {}), '(node_selected.node_id, hash_restore)\n', (3078, 3115), False, 'from Jumpscale import j\n'), ((3796, 3868), 'Jumpscale.j.sal.zosv2.container.encrypt_secret', 'j.sal.zosv2.container.encrypt_secret', (['node_selected.node_id', 'hash_backup'], {}), '(node_selected.node_id, hash_backup)\n', (3832, 3868), False, 'from Jumpscale import j\n'), ((6855, 6877), 'Jumpscale.j.core.text.strip', 'j.core.text.strip', (['res'], {}), '(res)\n', (6872, 6877), False, 'from Jumpscale import j\n'), ((5018, 5091), 'Jumpscale.j.sal.zosv2.billing.payout_farmers', 'j.sal.zosv2.billing.payout_farmers', (["payment['wallet']", 'reservation_create'], {}), "(payment['wallet'], reservation_create)\n", (5052, 5091), False, 'from Jumpscale import j\n'), ((5100, 5169), 'Jumpscale.j.sal.reservation_chatflow.payment_wait', 'j.sal.reservation_chatflow.payment_wait', (['bot', 'rid'], {'threebot_app': '(False)'}), '(bot, rid, threebot_app=False)\n', (5139, 5169), False, 'from Jumpscale import j\n'), ((5188, 5304), 'Jumpscale.j.sal.reservation_chatflow.payment_wait', 'j.sal.reservation_chatflow.payment_wait', (['bot', 'rid'], {'threebot_app': '(True)', 'reservation_create_resp': 'reservation_create'}), '(bot, rid, threebot_app=True,\n reservation_create_resp=reservation_create)\n', (5227, 5304), False, 'from Jumpscale import j\n'), ((6269, 6342), 'Jumpscale.j.sal.zosv2.billing.payout_farmers', 'j.sal.zosv2.billing.payout_farmers', (["payment['wallet']", 'reservation_create'], {}), "(payment['wallet'], reservation_create)\n", (6303, 6342), False, 'from Jumpscale import j\n'), ((6351, 6424), 'Jumpscale.j.sal.reservation_chatflow.payment_wait', 'j.sal.reservation_chatflow.payment_wait', (['bot', 'resv_id'], {'threebot_app': '(False)'}), '(bot, resv_id, threebot_app=False)\n', (6390, 6424), False, 'from Jumpscale import j\n'), ((6443, 6563), 'Jumpscale.j.sal.reservation_chatflow.payment_wait', 'j.sal.reservation_chatflow.payment_wait', (['bot', 'resv_id'], {'threebot_app': '(True)', 'reservation_create_resp': 'reservation_create'}), '(bot, resv_id, threebot_app=True,\n reservation_create_resp=reservation_create)\n', (6482, 6563), False, 'from Jumpscale import j\n')] |
import iso8601
import unittest
from copy import deepcopy
try:
import cPickle as pickle
except ImportError:
import pickle
class TestISO8601(unittest.TestCase):
def test_iso8601_regex(self):
assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
def test_timezone_regex(self):
assert iso8601.TIMEZONE_REGEX.match("+01:00")
assert iso8601.TIMEZONE_REGEX.match("+00:00")
assert iso8601.TIMEZONE_REGEX.match("+01:20")
assert iso8601.TIMEZONE_REGEX.match("-01:00")
def test_parse_date(self):
d = iso8601.parse_date("2006-10-20T15:34:56Z")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.tzinfo == iso8601.UTC
def test_parse_only_date(self):
d = iso8601.parse_date("2006-10-20")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 0
assert d.minute == 0
assert d.second == 0
assert d.tzinfo == iso8601.UTC
def test_parse_date_fraction(self):
d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.microsecond == 123000
assert d.tzinfo == iso8601.UTC
def test_parse_date_fraction_2(self):
"""From bug 6
"""
d = iso8601.parse_date("2007-5-7T11:43:55.328Z")
assert d.year == 2007
assert d.month == 5
assert d.day == 7
assert d.hour == 11
assert d.minute == 43
assert d.second == 55
assert d.microsecond == 328000
assert d.tzinfo == iso8601.UTC
def test_parse_date_tz(self):
d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.microsecond == 123000
assert d.tzinfo.tzname(None) == "+02:30"
offset = d.tzinfo.utcoffset(None)
assert offset.days == 0
assert offset.seconds == 60 * 60 * 2.5
def test_parse_date_tz(self):
d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.microsecond == 123000
assert d.tzinfo.tzname(None) == "+02:30"
offset = d.tzinfo.utcoffset(None)
assert offset.days == 0
assert offset.seconds == 60 * 60 * 2.5
def test_parse_date_negtz(self):
d = iso8601.parse_date("2006-10-20T15:34:56.123-02:30")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.microsecond == 123000
assert d.tzinfo.tzname(None) == "-02:30"
offset = d.tzinfo.utcoffset(None)
assert offset.days == -1
assert offset.seconds == 86400 - 60 * 60 * 2.5
def test_parse_date_2d_tz(self):
d = iso8601.parse_date("2010-07-01 00:01:20+07")
assert d.year == 2010
assert d.month == 7
assert d.day == 1
assert d.hour == 0
assert d.minute == 1
assert d.second == 20
assert d.tzinfo.tzname(None) == "+07"
offset = d.tzinfo.utcoffset(None)
assert offset.days == 0
assert offset.seconds == 60 * 60 * 7
def test_parse_date_2d_negtz(self):
d = iso8601.parse_date("2010-07-01 00:01:20-07")
assert d.year == 2010
assert d.month == 7
assert d.day == 1
assert d.hour == 0
assert d.minute == 1
assert d.second == 20
assert d.tzinfo.tzname(None) == "-07"
offset = d.tzinfo.utcoffset(None)
assert offset.days == -1
assert offset.seconds == 86400 - 60 * 60 * 7
def test_parse_date_2d_ms_tz(self):
d = iso8601.parse_date("2011-07-27 21:05:12.843248+07")
assert d.year == 2011
assert d.month == 7
assert d.day == 27
assert d.hour == 21
assert d.minute == 5
assert d.second == 12
assert d.microsecond == 843248
assert d.tzinfo.tzname(None) == "+07"
offset = d.tzinfo.utcoffset(None)
assert offset.days == 0
assert offset.seconds == 60 * 60 * 7
def test_parse_date_2d_ms_negtz(self):
d = iso8601.parse_date("2011-07-27 21:05:12.843248-07")
assert d.year == 2011
assert d.month == 7
assert d.day == 27
assert d.hour == 21
assert d.minute == 5
assert d.second == 12
assert d.microsecond == 843248
assert d.tzinfo.tzname(None) == "-07"
offset = d.tzinfo.utcoffset(None)
assert offset.days == -1
assert offset.seconds == 86400 - 60 * 60 * 7
def test_parse_invalid_date(self):
self.assertRaises(iso8601.ParseError, iso8601.parse_date, None)
def test_parse_invalid_date2(self):
self.assertRaises(iso8601.ParseError, iso8601.parse_date, "23")
def test_parse_invalid_date3(self):
self.assertRaises(iso8601.ParseError, iso8601.parse_date, "1355054205")
def test_parse_no_timezone(self):
"""issue 4 - Handle datetime string without timezone
This tests what happens when you parse a date with no timezone. While not
strictly correct this is quite common. I'll assume UTC for the time zone
in this case.
"""
d = iso8601.parse_date("2007-01-01T08:00:00")
assert d.year == 2007
assert d.month == 1
assert d.day == 1
assert d.hour == 8
assert d.minute == 0
assert d.second == 0
assert d.microsecond == 0
assert d.tzinfo == iso8601.UTC
def test_parse_no_timezone_different_default(self):
tz = iso8601.FixedOffset(2, 0, "test offset")
d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
assert d.tzinfo == tz
def test_space_separator(self):
"""Handle a separator other than T
"""
d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
assert d.year == 2007
assert d.month == 6
assert d.day == 23
assert d.hour == 6
assert d.minute == 40
assert d.second == 34
assert d.microsecond == 0
assert d.tzinfo == iso8601.UTC
def test_deepcopy(self):
"""
issue 20 - dates returned by parse_date do not support deepcopy
FixedOffset can not be deep copied (raises a TypeError).
"""
d = iso8601.parse_date('2012-06-13 11:06:47+02:00')
d_copy = deepcopy(d)
assert d is not d_copy
assert d == d_copy
def test_pickle_utc(self):
"""Tests (UTC) dates returned by parse_date can be pickled"""
d = iso8601.parse_date('2012-09-19T01:54:30')
d_pickled = pickle.dumps(d)
d_copy = pickle.loads(d_pickled)
assert d == d_copy
def test_binary_pickle_utc(self):
"""Tests (UTC) dates returned by parse_date can be (binary) pickled"""
d = iso8601.parse_date('2012-09-19T01:54:30')
d_pickled = pickle.dumps(d, pickle.HIGHEST_PROTOCOL)
d_copy = pickle.loads(d_pickled)
assert d == d_copy
def test_pickle_fixed(self):
"""Tests (FixedOffset) dates returned by parse_date can be pickled"""
d = iso8601.parse_date('2012-09-19T11:59:05+10:00')
d_pickled = pickle.dumps(d)
d_copy = pickle.loads(d_pickled)
assert d == d_copy
def test_binary_pickle_fixed(self):
"""Tests (FixedOffset) dates returned by parse_date can be (binary) pickled"""
d = iso8601.parse_date('2012-09-19T11:59:05+10:00')
d_pickled = pickle.dumps(d, pickle.HIGHEST_PROTOCOL)
d_copy = pickle.loads(d_pickled)
assert d == d_copy
def test_date_no_day(self):
d = iso8601.parse_date('2012-12')
assert d.year == 2012
assert d.month == 12
assert d.day == 1
assert d.hour == 0
assert d.minute == 0
assert d.second == 0
assert d.microsecond == 0
assert d.tzinfo == iso8601.UTC
def test_date_no_month(self):
d = iso8601.parse_date('2012')
assert d.year == 2012
assert d.month == 1
assert d.day == 1
assert d.hour == 0
assert d.minute == 0
assert d.second == 0
assert d.microsecond == 0
assert d.tzinfo == iso8601.UTC
if __name__ == '__main__':
unittest.main()
| [
"iso8601.ISO8601_REGEX.match",
"pickle.dumps",
"iso8601.FixedOffset",
"pickle.loads",
"iso8601.parse_date",
"iso8601.TIMEZONE_REGEX.match",
"copy.deepcopy",
"unittest.main"
] | [((8954, 8969), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8967, 8969), False, 'import unittest\n'), ((217, 268), 'iso8601.ISO8601_REGEX.match', 'iso8601.ISO8601_REGEX.match', (['"""2006-10-11T00:14:33Z"""'], {}), "('2006-10-11T00:14:33Z')\n", (244, 268), False, 'import iso8601\n'), ((324, 362), 'iso8601.TIMEZONE_REGEX.match', 'iso8601.TIMEZONE_REGEX.match', (['"""+01:00"""'], {}), "('+01:00')\n", (352, 362), False, 'import iso8601\n'), ((378, 416), 'iso8601.TIMEZONE_REGEX.match', 'iso8601.TIMEZONE_REGEX.match', (['"""+00:00"""'], {}), "('+00:00')\n", (406, 416), False, 'import iso8601\n'), ((432, 470), 'iso8601.TIMEZONE_REGEX.match', 'iso8601.TIMEZONE_REGEX.match', (['"""+01:20"""'], {}), "('+01:20')\n", (460, 470), False, 'import iso8601\n'), ((486, 524), 'iso8601.TIMEZONE_REGEX.match', 'iso8601.TIMEZONE_REGEX.match', (['"""-01:00"""'], {}), "('-01:00')\n", (514, 524), False, 'import iso8601\n'), ((573, 615), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2006-10-20T15:34:56Z"""'], {}), "('2006-10-20T15:34:56Z')\n", (591, 615), False, 'import iso8601\n'), ((886, 918), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2006-10-20"""'], {}), "('2006-10-20')\n", (904, 918), False, 'import iso8601\n'), ((1186, 1232), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2006-10-20T15:34:56.123Z"""'], {}), "('2006-10-20T15:34:56.123Z')\n", (1204, 1232), False, 'import iso8601\n'), ((1587, 1631), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2007-5-7T11:43:55.328Z"""'], {}), "('2007-5-7T11:43:55.328Z')\n", (1605, 1631), False, 'import iso8601\n'), ((1933, 1984), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2006-10-20T15:34:56.123+02:30"""'], {}), "('2006-10-20T15:34:56.123+02:30')\n", (1951, 1984), False, 'import iso8601\n'), ((2424, 2475), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2006-10-20T15:34:56.123+02:30"""'], {}), "('2006-10-20T15:34:56.123+02:30')\n", (2442, 2475), False, 'import iso8601\n'), ((2909, 2960), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2006-10-20T15:34:56.123-02:30"""'], {}), "('2006-10-20T15:34:56.123-02:30')\n", (2927, 2960), False, 'import iso8601\n'), ((3403, 3447), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2010-07-01 00:01:20+07"""'], {}), "('2010-07-01 00:01:20+07')\n", (3421, 3447), False, 'import iso8601\n'), ((3844, 3888), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2010-07-01 00:01:20-07"""'], {}), "('2010-07-01 00:01:20-07')\n", (3862, 3888), False, 'import iso8601\n'), ((4286, 4337), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2011-07-27 21:05:12.843248+07"""'], {}), "('2011-07-27 21:05:12.843248+07')\n", (4304, 4337), False, 'import iso8601\n'), ((4770, 4821), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2011-07-27 21:05:12.843248-07"""'], {}), "('2011-07-27 21:05:12.843248-07')\n", (4788, 4821), False, 'import iso8601\n'), ((5883, 5924), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2007-01-01T08:00:00"""'], {}), "('2007-01-01T08:00:00')\n", (5901, 5924), False, 'import iso8601\n'), ((6241, 6281), 'iso8601.FixedOffset', 'iso8601.FixedOffset', (['(2)', '(0)', '"""test offset"""'], {}), "(2, 0, 'test offset')\n", (6260, 6281), False, 'import iso8601\n'), ((6294, 6356), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2007-01-01T08:00:00"""'], {'default_timezone': 'tz'}), "('2007-01-01T08:00:00', default_timezone=tz)\n", (6312, 6356), False, 'import iso8601\n'), ((6504, 6549), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2007-06-23 06:40:34.00Z"""'], {}), "('2007-06-23 06:40:34.00Z')\n", (6522, 6549), False, 'import iso8601\n'), ((6999, 7046), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012-06-13 11:06:47+02:00"""'], {}), "('2012-06-13 11:06:47+02:00')\n", (7017, 7046), False, 'import iso8601\n'), ((7064, 7075), 'copy.deepcopy', 'deepcopy', (['d'], {}), '(d)\n', (7072, 7075), False, 'from copy import deepcopy\n'), ((7248, 7289), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012-09-19T01:54:30"""'], {}), "('2012-09-19T01:54:30')\n", (7266, 7289), False, 'import iso8601\n'), ((7310, 7325), 'pickle.dumps', 'pickle.dumps', (['d'], {}), '(d)\n', (7322, 7325), False, 'import pickle\n'), ((7343, 7366), 'pickle.loads', 'pickle.loads', (['d_pickled'], {}), '(d_pickled)\n', (7355, 7366), False, 'import pickle\n'), ((7524, 7565), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012-09-19T01:54:30"""'], {}), "('2012-09-19T01:54:30')\n", (7542, 7565), False, 'import iso8601\n'), ((7586, 7626), 'pickle.dumps', 'pickle.dumps', (['d', 'pickle.HIGHEST_PROTOCOL'], {}), '(d, pickle.HIGHEST_PROTOCOL)\n', (7598, 7626), False, 'import pickle\n'), ((7644, 7667), 'pickle.loads', 'pickle.loads', (['d_pickled'], {}), '(d_pickled)\n', (7656, 7667), False, 'import pickle\n'), ((7819, 7866), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012-09-19T11:59:05+10:00"""'], {}), "('2012-09-19T11:59:05+10:00')\n", (7837, 7866), False, 'import iso8601\n'), ((7887, 7902), 'pickle.dumps', 'pickle.dumps', (['d'], {}), '(d)\n', (7899, 7902), False, 'import pickle\n'), ((7920, 7943), 'pickle.loads', 'pickle.loads', (['d_pickled'], {}), '(d_pickled)\n', (7932, 7943), False, 'import pickle\n'), ((8111, 8158), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012-09-19T11:59:05+10:00"""'], {}), "('2012-09-19T11:59:05+10:00')\n", (8129, 8158), False, 'import iso8601\n'), ((8179, 8219), 'pickle.dumps', 'pickle.dumps', (['d', 'pickle.HIGHEST_PROTOCOL'], {}), '(d, pickle.HIGHEST_PROTOCOL)\n', (8191, 8219), False, 'import pickle\n'), ((8237, 8260), 'pickle.loads', 'pickle.loads', (['d_pickled'], {}), '(d_pickled)\n', (8249, 8260), False, 'import pickle\n'), ((8333, 8362), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012-12"""'], {}), "('2012-12')\n", (8351, 8362), False, 'import iso8601\n'), ((8653, 8679), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2012"""'], {}), "('2012')\n", (8671, 8679), False, 'import iso8601\n')] |
#!/usr/bin/env python
# coding: utf-8
# load data and preweighting iteratively and return:
# 1) phi1 and phi2
# 2) dilute fraction (check that the fitting worked)
# 3) surface tension (test for separation)
# 4) mu*
# 5) generate picture of reweighted histogram as a check
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import scipy.optimize
from scipy import stats
from sklearn import mixture
# convention:
# beta0=temp of the simulation from which the current weighting was extracted (not meaningful for uniform)
# beta1=temp of the actual simulation being analyzed
# betaNew=temp of the simulation we want to do next, after reweighting
# In[2]:
def getReweightedData(data,beta0,mu0,beta1,mu1,nWeighting):
dataCopy=data.copy()
weightArray=np.array([nWeighting[i] for i in data['N']])
weights=-(beta1-beta0)*data['E']+(beta1*mu1-beta0*mu0)*data['N']+np.log(weightArray)
dataCopy['w']=np.exp(weights)
return dataCopy
#generate data following distribution but without carrying around weighting
def getUnweightedData(data):
binWidth=5
binEdges=np.arange(np.min(data['N'])-binWidth/2,np.max(data['N'])+binWidth/2,binWidth)
hist = np.histogram(data['N'],weights=data['w'], density=True,bins=binEdges)
countVector=binEdges[1:]-binWidth/2
# #use date directly
# unweightedPMF=stats.rv_discrete( values=(countVector, hist))
# newData=unweightedPMF.rvs(size=30000)
#fit PDF
hist_dist = scipy.stats.rv_histogram(hist)
newData=hist_dist.rvs(size=200000)
return newData
# In[3]:
# #given an empirical histogram, find the deltaMu which gives equal weights to peaks. Return the deltaMu and shifted histogram
#basic form of PDF
def gaussian(x,mu,sigma):
prefactor=1/(sigma*np.sqrt(2*np.pi))
exponential=np.exp(-(((x-mu)/sigma)**2)/2)
return prefactor*exponential
#a and b are weights
def doubleGaussian(x,mu1,sigma1,mu2,sigma2,a):
b=1-a
return a*gaussian(x,mu1,sigma1)+b*gaussian(x,mu2,sigma2)
#given empirical pdf, fit to double gaussian. returns optimized parameters
def fitPDF(empiricalPDF):
guess=[300,100,700,100,0.5]
xData=list(empiricalPDF.keys())
yData=list(empiricalPDF.values())
params,cov=scipy.optimize.curve_fit(doubleGaussian,xData,yData,guess)
return params
def getGMM(data):
unweightedData=getUnweightedData(data)
unweightedData=unweightedData.reshape(-1, 1)
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(unweightedData)
return clf
def getDiluteFraction(data):
#get pdf from data
# binEdges=np.arange(np.min(data['N'])-0.5,np.max(data['N'])+0.5,1)
# hist,_ = np.histogram(data['N'],weights=data['w'], density=True,bins=binEdges)
# countVector=binEdges[1:]-0.5
# dataPDF=dict(zip(countVector, hist))
# fitParams=fitPDF(dataPDF)
# diluteFraction=fitParams[-1]
#Gaussian mixture model
thisGMM=getGMM(data)
means=thisGMM.means_
means=[means[0,0],means[1,0]]
weights=thisGMM.weights_
diluteFraction=weights[np.argmin(means)]
return diluteFraction
# In[4]:
#define a function mapping mu1 to dense fraction
def reweightedDiluteFraction(trialMu1,*paramTuple):
#unpack the tuple containing parameters
data,beta0,mu0,beta1=paramTuple
reweightedData=getReweightedData(data,beta0,mu0,beta1,trialMu1,multicanonicalWeighting)
diluteFraction=getDiluteFraction(reweightedData)
return diluteFraction-0.5
def getEqualWeights(data,beta0,mu0,beta1):
# solTuple=mu1=scipy.optimize.fsolve(reweightedDiluteFraction,mu0,args=(data,beta0,mu0,beta1),maxfev=1000,full_output=1)
# mu1=solTuple[0]
# mu1=mu1[0]
# print(solTuple)
leftEdge=mu0-0.01
rightEdge=mu0+0.01
brentCondition=False
while(brentCondition==False):
leftValue=reweightedDiluteFraction(leftEdge,data,beta0,mu0,beta1)
rightValue=reweightedDiluteFraction(rightEdge,data,beta0,mu0,beta1)
if np.sign(leftValue)==np.sign(rightValue):
if leftValue<0:
leftEdge=leftEdge-0.01
if rightValue>0:
rightEdge=rightEdge+0.01
else:
brentCondition=True
mu1=scipy.optimize.brentq(reweightedDiluteFraction,leftEdge,rightEdge,args=(data,beta0,mu0,beta1),xtol=0.0001,maxiter=5000)
solutionData=getReweightedData(data,beta0,mu0,beta1,mu1,multicanonicalWeighting)
return (mu1,solutionData)
def getMeans(data):
#get pdf from data
# binEdges=np.arange(np.min(data['N'])-0.5,np.max(data['N'])+0.5,1)
# hist,_ = np.histogram(data['N'],weights=data['w'], density=True,bins=binEdges)
# countVector=binEdges[1:]-0.5
# dataPDF=dict(zip(countVector, hist))
# fitParams=fitPDF(dataPDF)
# phi1,phi2=fitParams[1],fitParams[3]
#Gaussian mixture model
thisGMM=getGMM(data)
means=thisGMM.means_
means=np.array([means[0,0],means[1,0]])
means=np.sort(means)
phi1=means[0]*length/volume
phi2=means[1]*length/volume
return phi1,phi2
# In[5]:
def getSurfaceTension(data,binSize):
tensionHist,edges=np.histogram(data['N'],weights=data['w'],bins=np.arange(0,1000,binSize),density=True)
prelimThreshold=500
#find modes
thresholdIndex=np.argwhere(edges>prelimThreshold)[0,0]
mode1Index=np.argmax(tensionHist[0:thresholdIndex])
mode2Index=np.argmax(tensionHist[thresholdIndex:])+thresholdIndex
mode1=tensionHist[mode1Index]
mode2=tensionHist[mode2Index]
pMax=(mode1+mode2)/2
#find min between modes
pMin=np.min(tensionHist[mode1Index:mode2Index])
prefactor=1/(2*beta1*(linearD**2))
surfaceTension=prefactor*np.log(pMax/pMin)
return surfaceTension
# In[6]:
def getThermalizedDataframe(data,finalBeta):
#find step where we reach final temp
cooledStep=0
cooledTuple=np.nonzero((data.iloc[:,1]==finalBeta).values)
if len(cooledTuple[0]>0):
cooledStep=cooledTuple[0][0]
#thermalize
thermalizedData=data.iloc[cooledStep:]
thermalizedData=thermalizedData.iloc[int(len(thermalizedData)/5):]
return(thermalizedData)
# In[7]:
def getBetaPairs(seqArg):
newSeqArg=seqArg+'_'
betaPairs=[]
allFiles=os.listdir(path)
for thisFile in allFiles:
if thisFile.find(newSeqArg)!=-1 and thisFile.find('preweighting')!=-1:
loc1=thisFile.find('beta0')+6
loc2=thisFile.find('beta1')-1
loc3=thisFile.find('beta1')+6
loc4=thisFile.find('.txt')
thisBeta0=thisFile[loc1:loc2]
thisBeta1=thisFile[loc3:loc4]
betaPairs.append([float(thisBeta0),float(thisBeta1)])
return betaPairs
# In[8]:
#specify sequence and temperatures
linearD=30
volume=linearD**3
j=0.05
# numReps=3
numReps=3
seq='L24_b2'
length=24
path='/rawData/'
betaPairList=getBetaPairs(seq)
#beta1, phi1 mean, phi1 SD, phi2 mean, phi2 SD
dataSummary=np.zeros([len(betaPairList),5])
plotting=False
# In[24]:
#load data and process
phaseLine=np.zeros([len(betaPairList),3]) #T mu list
for counter,betaPair in enumerate(betaPairList):
beta0,beta1=betaPair
phi1Reps=[]
phi2Reps=[]
muStarReps=[]
for repNumber in range(numReps):
filename='writeRecord_multicanonical_'+seq+'_beta0_'+str(beta0)+'_beta1_'+str(beta1)+'_j'+str(j)+'_rep'+str(repNumber)+'.dat'
weightingFilename='preweighting_'+seq+'_beta0_'+str(beta0)+'_beta1_'+str(beta1)+'.txt'
#load
thisData=pd.read_csv(path+filename,sep=' ',header=None,names=['steps','beta','N','E'],index_col=False)
#thermalize
thermalizedData=getThermalizedDataframe(thisData,beta1)
#load multicanonical weighting data. It will be just -1: -1 for uniform weighting
multicanonicalWeightingArray=np.loadtxt(path+weightingFilename,skiprows=9,delimiter=' ')
multicanonicalWeighting={}
for i in range(multicanonicalWeightingArray.shape[0]):
multicanonicalWeighting[int(multicanonicalWeightingArray[i,0])]=multicanonicalWeightingArray[i,1]
#load mu1 from preweighting file
weightingFileStream=open(path+weightingFilename,"r")
weightingLines=weightingFileStream.readlines()
mu1=float(weightingLines[7])
#remove eta
dataGCE=getReweightedData(thermalizedData,beta1,mu1,beta1,mu1,multicanonicalWeighting)
# #reweight to equal weights
beta1MuCritical,beta1Sol=getEqualWeights(dataGCE,beta1,mu1,beta1)
muStarReps.append(beta1MuCritical)
#get phi overlap, surface tension
phi1,phi2=getMeans(beta1Sol)
diluteFraction=getDiluteFraction(beta1Sol)
surfaceTension=getSurfaceTension(beta1Sol,10)
phi1Reps.append(phi1)
phi2Reps.append(phi2)
phi1_mean=np.mean(phi1Reps)
phi1_sd=np.std(phi1Reps)
phi2_mean=np.mean(phi2Reps)
phi2_sd=np.std(phi2Reps)
dataSummary[counter]=np.array([beta1,phi1_mean,phi1_sd,phi2_mean,phi2_sd])
muStar_mean=np.mean(muStarReps)
muStar_std=np.std(muStarReps)
phaseLine[counter]=np.array([muStar_mean,muStar_std,1/beta1])
# print(surfaceTension)
#export plots
bins=np.arange(0,1000,5)
if plotting:
plt.figure()
plt.hist(thermalizedData['N'],bins=bins,density=True)
plt.xlabel('$N$')
plt.ylabel('$P(N)$')
plt.title(seq+', $\\beta=$'+str(beta1)+', $\\tilde{H}$')
# plt.savefig(seq+'_beta1_'+str(beta1)+'_eta.png')
plt.figure()
plt.hist(dataGCE['N'],weights=dataGCE['w'],bins=bins,density=True)
plt.xlabel('$N$')
plt.ylabel('$P(N)$')
plt.title(seq+', $\\beta=$'+str(beta1)+', $H$')
# plt.savefig(seq+'_beta1_'+str(beta1)+'_noEta.png')
plt.figure()
plt.hist(beta1Sol['N'],weights=beta1Sol['w'],bins=bins,density=True)
plt.xlabel('$N$')
plt.ylabel('$P(N)$')
plt.title(seq+', $\\beta=$'+str(beta1)+', equal weights'+'\n'+'$\\phi_1=$'+str(round(phi1,3))+', $\\phi_2=$'+str(round(phi2,3)))
# plt.savefig(seq+'_beta1_'+str(beta1)+'_reweighted.png')
# In[44]:
#for ell=2:
criticalPoint=np.array([-10.488682202333335,1.0888366012595907])
plt.figure(figsize=(8,6))
plt.errorbar(phaseLine[:,0],phaseLine[:,2],yerr=None,xerr=phaseLine[:,1],markersize=12,marker='.',linestyle='-')
plt.plot(criticalPoint[0],criticalPoint[1],marker='*',markersize=15)
plt.xlim(-10.65,-10.43)
plt.ylim(1.061,1.095)
plt.xlabel('Chemical potential $\\mu$ $(\\epsilon)$',fontsize=22)
plt.ylabel('Temperature $T$ $(\\epsilon/k_\mathrm{B}$)',fontsize=22)
plt.tick_params(axis='both',labelsize=18)
# plt.savefig('phaseLine.svg',bbox_inches = "tight")
# In[ ]:
np.save('phaseData_'+seq+'_j'+str(j)+'.npy',dataSummary)
# In[ ]:
#example of multicanonical weighting for methods
if seq=='L24_b2':
beta0,beta1=betaPairList[-1]
phi1Reps=[]
phi2Reps=[]
for repNumber in range(numReps):
filename='writeRecord_multicanonical_'+seq+'_beta0_'+str(beta0)+'_beta1_'+str(beta1)+'_j'+str(j)+'_rep'+str(repNumber)+'.dat'
weightingFilename='preweighting_'+seq+'_beta0_'+str(beta0)+'_beta1_'+str(beta1)+'.txt'
#load
thisData=pd.read_csv(path+filename,sep=' ',header=None,names=['steps','beta','N','E'],index_col=False)
#thermalize
thermalizedData=getThermalizedDataframe(thisData,beta1)
#load multicanonical weighting data. It will be just -1: -1 for uniform weighting
multicanonicalWeightingArray=np.loadtxt(path+weightingFilename,skiprows=9,delimiter=' ')
multicanonicalWeighting={}
for i in range(multicanonicalWeightingArray.shape[0]):
multicanonicalWeighting[int(multicanonicalWeightingArray[i,0])]=multicanonicalWeightingArray[i,1]
#load mu1 from preweighting file
weightingFileStream=open(path+weightingFilename,"r")
weightingLines=weightingFileStream.readlines()
mu1=float(weightingLines[7])
#remove eta
dataGCE=getReweightedData(thermalizedData,beta1,mu1,beta1,mu1,multicanonicalWeighting)
# #reweight to equal weights
beta1MuCritical,beta1Sol=getEqualWeights(dataGCE,beta1,mu1,beta1)
#get phi overlap, surface tension
phi1,phi2=getMeans(beta1Sol)
diluteFraction=getDiluteFraction(beta1Sol)
surfaceTension=getSurfaceTension(beta1Sol,10)
phi1Reps.append(phi1)
phi2Reps.append(phi2)
phi1_mean=np.mean(phi1Reps)
phi1_sd=np.std(phi1Reps)
phi2_mean=np.mean(phi2Reps)
phi2_sd=np.std(phi2Reps)
dataSummary[counter]=np.array([beta1,phi1_mean,phi1_sd,phi2_mean,phi2_sd])
# print(surfaceTension)
yMax=0.00925
figSize=[8,6]
labelSize=22
tickSize=18
#export plots
bins=np.arange(0,1100,5)
if plotting:
plt.figure(figsize=(figSize[0],figSize[1]))
plt.hist(thermalizedData['N'],bins=bins,density=True)
plt.xlabel('Polymer number $N$',fontsize=labelSize)
plt.ylabel('Probability $\\tilde{P}(N)$',fontsize=labelSize)
plt.ylim(ymax=yMax)
plt.tick_params(axis='both',labelsize=tickSize)
plt.tight_layout()
plt.savefig('methods_multicanonical_'+seq+'_beta1_'+str(beta1)+'_raw.svg')
plt.figure(figsize=(figSize[0],figSize[1]))
plt.hist(dataGCE['N'],weights=dataGCE['w'],bins=bins,density=True)
plt.xlabel('Polymer number $N$',fontsize=labelSize)
plt.ylabel('Probability $P(N)$',fontsize=labelSize)
plt.ylim(ymax=yMax)
plt.tick_params(axis='both',labelsize=tickSize)
plt.tight_layout()
# plt.savefig('methods_multicanonical_'+seq+'_beta1_'+str(beta1)+'_noEta.svg')
plt.figure(figsize=(figSize[0],figSize[1]))
plt.hist(beta1Sol['N'],weights=beta1Sol['w'],bins=bins,density=True)
plt.xlabel('Polymer number $N$',fontsize=labelSize)
plt.ylabel('Probability $P(N)$',fontsize=labelSize)
plt.ylim(ymax=yMax)
plt.tick_params(axis='both',labelsize=tickSize)
plt.tight_layout()
# plt.savefig('methods_multicanonical_'+seq+'_beta1_'+str(beta1)+'_reweighted.svg')
# In[ ]:
| [
"matplotlib.pyplot.hist",
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"matplotlib.pyplot.errorbar",
"numpy.arange",
"numpy.mean",
"numpy.histogram",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.sort",
"numpy.max",
"numpy.exp",
"numpy.min",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"sklearn.mixture.GaussianMixture",
"matplotlib.pyplot.tick_params",
"numpy.argmax",
"numpy.sign",
"numpy.nonzero",
"numpy.std",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.figure",
"numpy.argwhere",
"matplotlib.pyplot.tight_layout",
"numpy.loadtxt"
] | [((10634, 10685), 'numpy.array', 'np.array', (['[-10.488682202333335, 1.0888366012595907]'], {}), '([-10.488682202333335, 1.0888366012595907])\n', (10642, 10685), True, 'import numpy as np\n'), ((10686, 10712), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (10696, 10712), True, 'import matplotlib.pyplot as plt\n'), ((10712, 10837), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['phaseLine[:, 0]', 'phaseLine[:, 2]'], {'yerr': 'None', 'xerr': 'phaseLine[:, 1]', 'markersize': '(12)', 'marker': '"""."""', 'linestyle': '"""-"""'}), "(phaseLine[:, 0], phaseLine[:, 2], yerr=None, xerr=phaseLine[:,\n 1], markersize=12, marker='.', linestyle='-')\n", (10724, 10837), True, 'import matplotlib.pyplot as plt\n'), ((10825, 10896), 'matplotlib.pyplot.plot', 'plt.plot', (['criticalPoint[0]', 'criticalPoint[1]'], {'marker': '"""*"""', 'markersize': '(15)'}), "(criticalPoint[0], criticalPoint[1], marker='*', markersize=15)\n", (10833, 10896), True, 'import matplotlib.pyplot as plt\n'), ((10894, 10918), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-10.65)', '(-10.43)'], {}), '(-10.65, -10.43)\n', (10902, 10918), True, 'import matplotlib.pyplot as plt\n'), ((10918, 10940), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1.061)', '(1.095)'], {}), '(1.061, 1.095)\n', (10926, 10940), True, 'import matplotlib.pyplot as plt\n'), ((10940, 11006), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Chemical potential $\\\\mu$ $(\\\\epsilon)$"""'], {'fontsize': '(22)'}), "('Chemical potential $\\\\mu$ $(\\\\epsilon)$', fontsize=22)\n", (10950, 11006), True, 'import matplotlib.pyplot as plt\n'), ((11006, 11076), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature $T$ $(\\\\epsilon/k_\\\\mathrm{B}$)"""'], {'fontsize': '(22)'}), "('Temperature $T$ $(\\\\epsilon/k_\\\\mathrm{B}$)', fontsize=22)\n", (11016, 11076), True, 'import matplotlib.pyplot as plt\n'), ((11075, 11117), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': '(18)'}), "(axis='both', labelsize=18)\n", (11090, 11117), True, 'import matplotlib.pyplot as plt\n'), ((827, 871), 'numpy.array', 'np.array', (["[nWeighting[i] for i in data['N']]"], {}), "([nWeighting[i] for i in data['N']])\n", (835, 871), True, 'import numpy as np\n'), ((1003, 1018), 'numpy.exp', 'np.exp', (['weights'], {}), '(weights)\n', (1009, 1018), True, 'import numpy as np\n'), ((1272, 1343), 'numpy.histogram', 'np.histogram', (["data['N']"], {'weights': "data['w']", 'density': '(True)', 'bins': 'binEdges'}), "(data['N'], weights=data['w'], density=True, bins=binEdges)\n", (1284, 1343), True, 'import numpy as np\n'), ((1893, 1929), 'numpy.exp', 'np.exp', (['(-((x - mu) / sigma) ** 2 / 2)'], {}), '(-((x - mu) / sigma) ** 2 / 2)\n', (1899, 1929), True, 'import numpy as np\n'), ((2578, 2641), 'sklearn.mixture.GaussianMixture', 'mixture.GaussianMixture', ([], {'n_components': '(2)', 'covariance_type': '"""full"""'}), "(n_components=2, covariance_type='full')\n", (2601, 2641), False, 'from sklearn import mixture\n'), ((5203, 5239), 'numpy.array', 'np.array', (['[means[0, 0], means[1, 0]]'], {}), '([means[0, 0], means[1, 0]])\n', (5211, 5239), True, 'import numpy as np\n'), ((5247, 5261), 'numpy.sort', 'np.sort', (['means'], {}), '(means)\n', (5254, 5261), True, 'import numpy as np\n'), ((5663, 5703), 'numpy.argmax', 'np.argmax', (['tensionHist[0:thresholdIndex]'], {}), '(tensionHist[0:thresholdIndex])\n', (5672, 5703), True, 'import numpy as np\n'), ((5919, 5961), 'numpy.min', 'np.min', (['tensionHist[mode1Index:mode2Index]'], {}), '(tensionHist[mode1Index:mode2Index])\n', (5925, 5961), True, 'import numpy as np\n'), ((6221, 6270), 'numpy.nonzero', 'np.nonzero', (['(data.iloc[:, 1] == finalBeta).values'], {}), '((data.iloc[:, 1] == finalBeta).values)\n', (6231, 6270), True, 'import numpy as np\n'), ((6593, 6609), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (6603, 6609), False, 'import os\n'), ((9256, 9273), 'numpy.mean', 'np.mean', (['phi1Reps'], {}), '(phi1Reps)\n', (9263, 9273), True, 'import numpy as np\n'), ((9286, 9302), 'numpy.std', 'np.std', (['phi1Reps'], {}), '(phi1Reps)\n', (9292, 9302), True, 'import numpy as np\n'), ((9322, 9339), 'numpy.mean', 'np.mean', (['phi2Reps'], {}), '(phi2Reps)\n', (9329, 9339), True, 'import numpy as np\n'), ((9352, 9368), 'numpy.std', 'np.std', (['phi2Reps'], {}), '(phi2Reps)\n', (9358, 9368), True, 'import numpy as np\n'), ((9399, 9456), 'numpy.array', 'np.array', (['[beta1, phi1_mean, phi1_sd, phi2_mean, phi2_sd]'], {}), '([beta1, phi1_mean, phi1_sd, phi2_mean, phi2_sd])\n', (9407, 9456), True, 'import numpy as np\n'), ((9479, 9498), 'numpy.mean', 'np.mean', (['muStarReps'], {}), '(muStarReps)\n', (9486, 9498), True, 'import numpy as np\n'), ((9514, 9532), 'numpy.std', 'np.std', (['muStarReps'], {}), '(muStarReps)\n', (9520, 9532), True, 'import numpy as np\n'), ((9556, 9602), 'numpy.array', 'np.array', (['[muStar_mean, muStar_std, 1 / beta1]'], {}), '([muStar_mean, muStar_std, 1 / beta1])\n', (9564, 9602), True, 'import numpy as np\n'), ((9669, 9690), 'numpy.arange', 'np.arange', (['(0)', '(1000)', '(5)'], {}), '(0, 1000, 5)\n', (9678, 9690), True, 'import numpy as np\n'), ((12949, 12966), 'numpy.mean', 'np.mean', (['phi1Reps'], {}), '(phi1Reps)\n', (12956, 12966), True, 'import numpy as np\n'), ((12979, 12995), 'numpy.std', 'np.std', (['phi1Reps'], {}), '(phi1Reps)\n', (12985, 12995), True, 'import numpy as np\n'), ((13011, 13028), 'numpy.mean', 'np.mean', (['phi2Reps'], {}), '(phi2Reps)\n', (13018, 13028), True, 'import numpy as np\n'), ((13041, 13057), 'numpy.std', 'np.std', (['phi2Reps'], {}), '(phi2Reps)\n', (13047, 13057), True, 'import numpy as np\n'), ((13084, 13141), 'numpy.array', 'np.array', (['[beta1, phi1_mean, phi1_sd, phi2_mean, phi2_sd]'], {}), '([beta1, phi1_mean, phi1_sd, phi2_mean, phi2_sd])\n', (13092, 13141), True, 'import numpy as np\n'), ((13273, 13294), 'numpy.arange', 'np.arange', (['(0)', '(1100)', '(5)'], {}), '(0, 1100, 5)\n', (13282, 13294), True, 'import numpy as np\n'), ((960, 979), 'numpy.log', 'np.log', (['weightArray'], {}), '(weightArray)\n', (966, 979), True, 'import numpy as np\n'), ((3241, 3257), 'numpy.argmin', 'np.argmin', (['means'], {}), '(means)\n', (3250, 3257), True, 'import numpy as np\n'), ((5598, 5634), 'numpy.argwhere', 'np.argwhere', (['(edges > prelimThreshold)'], {}), '(edges > prelimThreshold)\n', (5609, 5634), True, 'import numpy as np\n'), ((5719, 5758), 'numpy.argmax', 'np.argmax', (['tensionHist[thresholdIndex:]'], {}), '(tensionHist[thresholdIndex:])\n', (5728, 5758), True, 'import numpy as np\n'), ((6040, 6059), 'numpy.log', 'np.log', (['(pMax / pMin)'], {}), '(pMax / pMin)\n', (6046, 6059), True, 'import numpy as np\n'), ((7913, 8019), 'pandas.read_csv', 'pd.read_csv', (['(path + filename)'], {'sep': '""" """', 'header': 'None', 'names': "['steps', 'beta', 'N', 'E']", 'index_col': '(False)'}), "(path + filename, sep=' ', header=None, names=['steps', 'beta',\n 'N', 'E'], index_col=False)\n", (7924, 8019), True, 'import pandas as pd\n'), ((8221, 8284), 'numpy.loadtxt', 'np.loadtxt', (['(path + weightingFilename)'], {'skiprows': '(9)', 'delimiter': '""" """'}), "(path + weightingFilename, skiprows=9, delimiter=' ')\n", (8231, 8284), True, 'import numpy as np\n'), ((9714, 9726), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9724, 9726), True, 'import matplotlib.pyplot as plt\n'), ((9735, 9790), 'matplotlib.pyplot.hist', 'plt.hist', (["thermalizedData['N']"], {'bins': 'bins', 'density': '(True)'}), "(thermalizedData['N'], bins=bins, density=True)\n", (9743, 9790), True, 'import matplotlib.pyplot as plt\n'), ((9797, 9814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$N$"""'], {}), "('$N$')\n", (9807, 9814), True, 'import matplotlib.pyplot as plt\n'), ((9823, 9843), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(N)$"""'], {}), "('$P(N)$')\n", (9833, 9843), True, 'import matplotlib.pyplot as plt\n'), ((9977, 9989), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9987, 9989), True, 'import matplotlib.pyplot as plt\n'), ((9998, 10067), 'matplotlib.pyplot.hist', 'plt.hist', (["dataGCE['N']"], {'weights': "dataGCE['w']", 'bins': 'bins', 'density': '(True)'}), "(dataGCE['N'], weights=dataGCE['w'], bins=bins, density=True)\n", (10006, 10067), True, 'import matplotlib.pyplot as plt\n'), ((10073, 10090), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$N$"""'], {}), "('$N$')\n", (10083, 10090), True, 'import matplotlib.pyplot as plt\n'), ((10099, 10119), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(N)$"""'], {}), "('$P(N)$')\n", (10109, 10119), True, 'import matplotlib.pyplot as plt\n'), ((10246, 10258), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10256, 10258), True, 'import matplotlib.pyplot as plt\n'), ((10267, 10338), 'matplotlib.pyplot.hist', 'plt.hist', (["beta1Sol['N']"], {'weights': "beta1Sol['w']", 'bins': 'bins', 'density': '(True)'}), "(beta1Sol['N'], weights=beta1Sol['w'], bins=bins, density=True)\n", (10275, 10338), True, 'import matplotlib.pyplot as plt\n'), ((10344, 10361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$N$"""'], {}), "('$N$')\n", (10354, 10361), True, 'import matplotlib.pyplot as plt\n'), ((10370, 10390), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(N)$"""'], {}), "('$P(N)$')\n", (10380, 10390), True, 'import matplotlib.pyplot as plt\n'), ((11686, 11792), 'pandas.read_csv', 'pd.read_csv', (['(path + filename)'], {'sep': '""" """', 'header': 'None', 'names': "['steps', 'beta', 'N', 'E']", 'index_col': '(False)'}), "(path + filename, sep=' ', header=None, names=['steps', 'beta',\n 'N', 'E'], index_col=False)\n", (11697, 11792), True, 'import pandas as pd\n'), ((11994, 12057), 'numpy.loadtxt', 'np.loadtxt', (['(path + weightingFilename)'], {'skiprows': '(9)', 'delimiter': '""" """'}), "(path + weightingFilename, skiprows=9, delimiter=' ')\n", (12004, 12057), True, 'import numpy as np\n'), ((13318, 13362), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(figSize[0], figSize[1])'}), '(figsize=(figSize[0], figSize[1]))\n', (13328, 13362), True, 'import matplotlib.pyplot as plt\n'), ((13370, 13425), 'matplotlib.pyplot.hist', 'plt.hist', (["thermalizedData['N']"], {'bins': 'bins', 'density': '(True)'}), "(thermalizedData['N'], bins=bins, density=True)\n", (13378, 13425), True, 'import matplotlib.pyplot as plt\n'), ((13432, 13484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polymer number $N$"""'], {'fontsize': 'labelSize'}), "('Polymer number $N$', fontsize=labelSize)\n", (13442, 13484), True, 'import matplotlib.pyplot as plt\n'), ((13492, 13553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability $\\\\tilde{P}(N)$"""'], {'fontsize': 'labelSize'}), "('Probability $\\\\tilde{P}(N)$', fontsize=labelSize)\n", (13502, 13553), True, 'import matplotlib.pyplot as plt\n'), ((13561, 13580), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymax': 'yMax'}), '(ymax=yMax)\n', (13569, 13580), True, 'import matplotlib.pyplot as plt\n'), ((13589, 13637), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'tickSize'}), "(axis='both', labelsize=tickSize)\n", (13604, 13637), True, 'import matplotlib.pyplot as plt\n'), ((13645, 13663), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13661, 13663), True, 'import matplotlib.pyplot as plt\n'), ((13758, 13802), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(figSize[0], figSize[1])'}), '(figsize=(figSize[0], figSize[1]))\n', (13768, 13802), True, 'import matplotlib.pyplot as plt\n'), ((13810, 13879), 'matplotlib.pyplot.hist', 'plt.hist', (["dataGCE['N']"], {'weights': "dataGCE['w']", 'bins': 'bins', 'density': '(True)'}), "(dataGCE['N'], weights=dataGCE['w'], bins=bins, density=True)\n", (13818, 13879), True, 'import matplotlib.pyplot as plt\n'), ((13885, 13937), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polymer number $N$"""'], {'fontsize': 'labelSize'}), "('Polymer number $N$', fontsize=labelSize)\n", (13895, 13937), True, 'import matplotlib.pyplot as plt\n'), ((13945, 13997), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability $P(N)$"""'], {'fontsize': 'labelSize'}), "('Probability $P(N)$', fontsize=labelSize)\n", (13955, 13997), True, 'import matplotlib.pyplot as plt\n'), ((14005, 14024), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymax': 'yMax'}), '(ymax=yMax)\n', (14013, 14024), True, 'import matplotlib.pyplot as plt\n'), ((14033, 14081), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'tickSize'}), "(axis='both', labelsize=tickSize)\n", (14048, 14081), True, 'import matplotlib.pyplot as plt\n'), ((14089, 14107), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14105, 14107), True, 'import matplotlib.pyplot as plt\n'), ((14205, 14249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(figSize[0], figSize[1])'}), '(figsize=(figSize[0], figSize[1]))\n', (14215, 14249), True, 'import matplotlib.pyplot as plt\n'), ((14257, 14328), 'matplotlib.pyplot.hist', 'plt.hist', (["beta1Sol['N']"], {'weights': "beta1Sol['w']", 'bins': 'bins', 'density': '(True)'}), "(beta1Sol['N'], weights=beta1Sol['w'], bins=bins, density=True)\n", (14265, 14328), True, 'import matplotlib.pyplot as plt\n'), ((14334, 14386), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polymer number $N$"""'], {'fontsize': 'labelSize'}), "('Polymer number $N$', fontsize=labelSize)\n", (14344, 14386), True, 'import matplotlib.pyplot as plt\n'), ((14394, 14446), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability $P(N)$"""'], {'fontsize': 'labelSize'}), "('Probability $P(N)$', fontsize=labelSize)\n", (14404, 14446), True, 'import matplotlib.pyplot as plt\n'), ((14454, 14473), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymax': 'yMax'}), '(ymax=yMax)\n', (14462, 14473), True, 'import matplotlib.pyplot as plt\n'), ((14482, 14530), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'tickSize'}), "(axis='both', labelsize=tickSize)\n", (14497, 14530), True, 'import matplotlib.pyplot as plt\n'), ((14538, 14556), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14554, 14556), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1210), 'numpy.min', 'np.min', (["data['N']"], {}), "(data['N'])\n", (1199, 1210), True, 'import numpy as np\n'), ((1222, 1239), 'numpy.max', 'np.max', (["data['N']"], {}), "(data['N'])\n", (1228, 1239), True, 'import numpy as np\n'), ((1859, 1877), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1866, 1877), True, 'import numpy as np\n'), ((4196, 4214), 'numpy.sign', 'np.sign', (['leftValue'], {}), '(leftValue)\n', (4203, 4214), True, 'import numpy as np\n'), ((4216, 4235), 'numpy.sign', 'np.sign', (['rightValue'], {}), '(rightValue)\n', (4223, 4235), True, 'import numpy as np\n'), ((5489, 5516), 'numpy.arange', 'np.arange', (['(0)', '(1000)', 'binSize'], {}), '(0, 1000, binSize)\n', (5498, 5516), True, 'import numpy as np\n')] |
from django.test import SimpleTestCase
from data_schema import FieldSchemaType
from data_schema.convert_value import convert_value
class BooleanConverterTest(SimpleTestCase):
def test_convert_value_true(self):
"""
Verifies true string values are True
"""
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 't'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'T'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'true'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'True'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 'TRUE'))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, True))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, 1))
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, '1'))
def test_convert_value_false(self):
"""
Verifies false string values are False
"""
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'f'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'F'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'false'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'False'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 'FALSE'))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, False))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, 0))
self.assertFalse(convert_value(FieldSchemaType.BOOLEAN, '0'))
def test_convert_value_empty(self):
"""
Verifies that any other value returns None
"""
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, None))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, ''))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, 'string'))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, 5))
def test_convert_value_default(self):
"""
Verifies that the default value will be used if the passed value is null
"""
self.assertTrue(convert_value(FieldSchemaType.BOOLEAN, None, default_value=True))
self.assertIsNone(convert_value(FieldSchemaType.BOOLEAN, 'invalid', default_value=True))
| [
"data_schema.convert_value.convert_value"
] | [((311, 354), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""t"""'], {}), "(FieldSchemaType.BOOLEAN, 't')\n", (324, 354), False, 'from data_schema.convert_value import convert_value\n'), ((380, 423), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""T"""'], {}), "(FieldSchemaType.BOOLEAN, 'T')\n", (393, 423), False, 'from data_schema.convert_value import convert_value\n'), ((449, 495), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""true"""'], {}), "(FieldSchemaType.BOOLEAN, 'true')\n", (462, 495), False, 'from data_schema.convert_value import convert_value\n'), ((521, 567), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""True"""'], {}), "(FieldSchemaType.BOOLEAN, 'True')\n", (534, 567), False, 'from data_schema.convert_value import convert_value\n'), ((593, 639), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""TRUE"""'], {}), "(FieldSchemaType.BOOLEAN, 'TRUE')\n", (606, 639), False, 'from data_schema.convert_value import convert_value\n'), ((665, 709), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '(True)'], {}), '(FieldSchemaType.BOOLEAN, True)\n', (678, 709), False, 'from data_schema.convert_value import convert_value\n'), ((735, 776), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '(1)'], {}), '(FieldSchemaType.BOOLEAN, 1)\n', (748, 776), False, 'from data_schema.convert_value import convert_value\n'), ((802, 845), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""1"""'], {}), "(FieldSchemaType.BOOLEAN, '1')\n", (815, 845), False, 'from data_schema.convert_value import convert_value\n'), ((984, 1027), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""f"""'], {}), "(FieldSchemaType.BOOLEAN, 'f')\n", (997, 1027), False, 'from data_schema.convert_value import convert_value\n'), ((1054, 1097), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""F"""'], {}), "(FieldSchemaType.BOOLEAN, 'F')\n", (1067, 1097), False, 'from data_schema.convert_value import convert_value\n'), ((1124, 1171), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""false"""'], {}), "(FieldSchemaType.BOOLEAN, 'false')\n", (1137, 1171), False, 'from data_schema.convert_value import convert_value\n'), ((1198, 1245), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""False"""'], {}), "(FieldSchemaType.BOOLEAN, 'False')\n", (1211, 1245), False, 'from data_schema.convert_value import convert_value\n'), ((1272, 1319), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""FALSE"""'], {}), "(FieldSchemaType.BOOLEAN, 'FALSE')\n", (1285, 1319), False, 'from data_schema.convert_value import convert_value\n'), ((1346, 1391), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '(False)'], {}), '(FieldSchemaType.BOOLEAN, False)\n', (1359, 1391), False, 'from data_schema.convert_value import convert_value\n'), ((1418, 1459), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '(0)'], {}), '(FieldSchemaType.BOOLEAN, 0)\n', (1431, 1459), False, 'from data_schema.convert_value import convert_value\n'), ((1486, 1529), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""0"""'], {}), "(FieldSchemaType.BOOLEAN, '0')\n", (1499, 1529), False, 'from data_schema.convert_value import convert_value\n'), ((1673, 1717), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', 'None'], {}), '(FieldSchemaType.BOOLEAN, None)\n', (1686, 1717), False, 'from data_schema.convert_value import convert_value\n'), ((1745, 1787), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '""""""'], {}), "(FieldSchemaType.BOOLEAN, '')\n", (1758, 1787), False, 'from data_schema.convert_value import convert_value\n'), ((1815, 1863), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""string"""'], {}), "(FieldSchemaType.BOOLEAN, 'string')\n", (1828, 1863), False, 'from data_schema.convert_value import convert_value\n'), ((1891, 1932), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '(5)'], {}), '(FieldSchemaType.BOOLEAN, 5)\n', (1904, 1932), False, 'from data_schema.convert_value import convert_value\n'), ((2106, 2170), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', 'None'], {'default_value': '(True)'}), '(FieldSchemaType.BOOLEAN, None, default_value=True)\n', (2119, 2170), False, 'from data_schema.convert_value import convert_value\n'), ((2198, 2267), 'data_schema.convert_value.convert_value', 'convert_value', (['FieldSchemaType.BOOLEAN', '"""invalid"""'], {'default_value': '(True)'}), "(FieldSchemaType.BOOLEAN, 'invalid', default_value=True)\n", (2211, 2267), False, 'from data_schema.convert_value import convert_value\n')] |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
res = demisto.executeCommand('addEntitlement', {
'persistent': demisto.get(demisto.args(), 'persistent'),
'replyEntriesTag': demisto.get(demisto.args(), 'replyEntriesTag')
})
if isError(res[0]):
demisto.results(res)
sys.exit(0)
entitlement = demisto.get(res[0], 'Contents')
option1 = demisto.get(demisto.args(), 'option1')
if not option1:
option1 = 'yes'
option2 = demisto.get(demisto.args(), 'option2')
if not option2:
option2 = 'no'
entitlementString = entitlement + '@' + demisto.investigation()['id']
if demisto.get(demisto.args(), 'task'):
entitlementString += '|' + demisto.get(demisto.args(), 'task')
message = '%s - Please reply `%s %s` or `%s %s`' % (demisto.args()['message'],
option1,
entitlementString,
option2,
entitlementString)
demisto.results(demisto.executeCommand('send-notification', {
'to': demisto.get(demisto.args(), 'user'),
'message': message,
'ignoreAddURL': 'true',
'using-brand': 'mattermost'
}))
| [
"demistomock.results",
"demistomock.args",
"demistomock.investigation",
"demistomock.get"
] | [((352, 383), 'demistomock.get', 'demisto.get', (['res[0]', '"""Contents"""'], {}), "(res[0], 'Contents')\n", (363, 383), True, 'import demistomock as demisto\n'), ((301, 321), 'demistomock.results', 'demisto.results', (['res'], {}), '(res)\n', (316, 321), True, 'import demistomock as demisto\n'), ((407, 421), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (419, 421), True, 'import demistomock as demisto\n'), ((493, 507), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (505, 507), True, 'import demistomock as demisto\n'), ((642, 656), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (654, 656), True, 'import demistomock as demisto\n'), ((596, 619), 'demistomock.investigation', 'demisto.investigation', ([], {}), '()\n', (617, 619), True, 'import demistomock as demisto\n'), ((172, 186), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (184, 186), True, 'import demistomock as demisto\n'), ((238, 252), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (250, 252), True, 'import demistomock as demisto\n'), ((710, 724), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (722, 724), True, 'import demistomock as demisto\n'), ((786, 800), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (798, 800), True, 'import demistomock as demisto\n'), ((1162, 1176), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (1174, 1176), True, 'import demistomock as demisto\n')] |
import os.path
import os
PATH = os.path.abspath(os.path.dirname(__file__))
def relative(path):
return os.path.abspath(os.path.join(PATH, path))
BUCKET_NAME = "pollcat"
DEBUG = True
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
MEDIA_ROOT = relative('media')
MEDIA_URL = "/uploads/"
STATIC_ROOT = ''
STATIC_URL = 'https://pollcat.s3.amazonaws.com/'
DEVELOPMENT = False
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_QUERYSTRING_AUTH = False # Don't include auth in every url
AWS_STORAGE_BUCKET_NAME = BUCKET_NAME
EMAIL_BACKEND = 'django_ses.SESBackend'
SERVER_EMAIL = '<EMAIL>'
#django-contact-form
DEFAULT_FROM_EMAIL = '<EMAIL>'
MANAGERS = (
('Web Manager', '<EMAIL>'),
)
ADMINS = (
('Web Admin','<EMAIL>'),
)
try:
from secure_settings import *
except ImportError:
pass | [
"os.path.dirname",
"os.path.join"
] | [((49, 74), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (64, 74), False, 'import os\n'), ((125, 149), 'os.path.join', 'os.path.join', (['PATH', 'path'], {}), '(PATH, path)\n', (137, 149), False, 'import os\n')] |
"""
Interface for retrieving scraped data
from bitcointalk.org
"""
import os
import psycopg2
import datetime
from isoweek import Week
from memoize import Memoizer
from sanic.log import logger
from skill.storage import Storage
store = {}
cached = Memoizer(store)
class BitcoinTalk:
"""
Class that provides an interface for getting
scraped bitcointalk.org data from the database.
The forum data is downloaded using the
`skill-scraper`.
"""
def __init__(self):
self.uri = os.getenv('POSTGRES_URI')
self.monday_last_week = str((Week.thisweek() - 1).monday())
self.sunday_last_week = str((Week.thisweek() - 1).sunday())
def __start_to_end_date(self, start, stop):
"""
Fetches all BitcoinTalk data from within
a specified time period.
Parameters
----------
start, stop: str
Date strings in ISO format.
Returns
-------
result: list
List of messages from database.
"""
with Storage(self.uri) as S:
sql = f"""
SELECT
subject,
content_no_quote_no_html
FROM message
WHERE post_time > '{start}' AND
post_time <= '{stop}'
"""
result = S.execute(sql)
return result
def last_week(self):
"""
Fetches last week's data from the database.
A week is defined as seven days prior to current day.
Returns
-------
list
List of records from database.
"""
return self.__start_to_end_date(self.monday_last_week,
self.sunday_last_week)
def all(self):
"""
Fetches all records from database.
Returns
-------
result: list
Records from database.
"""
with Storage(self.uri) as S:
result = S.execute("SELECT * FROM message")
return result
def sample(self, sample_size=0.01):
"""
Get a random sample from the database using PostgreSQL's
random sampling features (SYSTEM method). Refer to
official PostgreSQL documentation here:
* https://www.postgresql.org/docs/9.6/static/tablesample-method.html
Parameters
----------
sample_size: float
Share of records to return.
Returns
-------
result: list
Results from database.
"""
with Storage(self.uri) as S:
sql = f"""
SELECT
subject,
link,
content_no_quote_no_html
FROM message
TABLESAMPLE SYSTEM('{sample_size}')
"""
result = S.execute(sql)
return result
@cached(max_age=60 * 60 * 12)
def latest_message(self,coin):
if isinstance(coin,list):
logger.info("Input is list!")
results = []
S = Storage(self.uri)
S.open()
for c in coin:
logger.info(f"Querying for {c}")
sql = f"""
SELECT
link
FROM message
WHERE to_tsvector('english', content_no_quote_no_html) @@ to_tsquery('english', '{c}')
ORDER BY post_time DESC
LIMIT 1
"""
results.append(S.execute(sql)[0]['link'])
logger.info(f"Querying for {c} complete!")
S.close()
return results
else:
with Storage(self.uri) as S:
sql = f"""
SELECT
link
FROM message
WHERE to_tsvector('english', content_no_quote_no_html) @@ to_tsquery('english', '{coin}')
ORDER BY post_time DESC
LIMIT 1
"""
result = S.execute(sql)
return result[0]['link']
| [
"isoweek.Week.thisweek",
"os.getenv",
"sanic.log.logger.info",
"skill.storage.Storage",
"memoize.Memoizer"
] | [((248, 263), 'memoize.Memoizer', 'Memoizer', (['store'], {}), '(store)\n', (256, 263), False, 'from memoize import Memoizer\n'), ((512, 537), 'os.getenv', 'os.getenv', (['"""POSTGRES_URI"""'], {}), "('POSTGRES_URI')\n", (521, 537), False, 'import os\n'), ((1052, 1069), 'skill.storage.Storage', 'Storage', (['self.uri'], {}), '(self.uri)\n', (1059, 1069), False, 'from skill.storage import Storage\n'), ((1972, 1989), 'skill.storage.Storage', 'Storage', (['self.uri'], {}), '(self.uri)\n', (1979, 1989), False, 'from skill.storage import Storage\n'), ((2612, 2629), 'skill.storage.Storage', 'Storage', (['self.uri'], {}), '(self.uri)\n', (2619, 2629), False, 'from skill.storage import Storage\n'), ((3030, 3059), 'sanic.log.logger.info', 'logger.info', (['"""Input is list!"""'], {}), "('Input is list!')\n", (3041, 3059), False, 'from sanic.log import logger\n'), ((3101, 3118), 'skill.storage.Storage', 'Storage', (['self.uri'], {}), '(self.uri)\n', (3108, 3118), False, 'from skill.storage import Storage\n'), ((3183, 3215), 'sanic.log.logger.info', 'logger.info', (['f"""Querying for {c}"""'], {}), "(f'Querying for {c}')\n", (3194, 3215), False, 'from sanic.log import logger\n'), ((3617, 3659), 'sanic.log.logger.info', 'logger.info', (['f"""Querying for {c} complete!"""'], {}), "(f'Querying for {c} complete!')\n", (3628, 3659), False, 'from sanic.log import logger\n'), ((3749, 3766), 'skill.storage.Storage', 'Storage', (['self.uri'], {}), '(self.uri)\n', (3756, 3766), False, 'from skill.storage import Storage\n'), ((575, 590), 'isoweek.Week.thisweek', 'Week.thisweek', ([], {}), '()\n', (588, 590), False, 'from isoweek import Week\n'), ((643, 658), 'isoweek.Week.thisweek', 'Week.thisweek', ([], {}), '()\n', (656, 658), False, 'from isoweek import Week\n')] |
import platform
from pathlib import Path
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured
#
# Environment setup
#
VERSION = '0.2.0-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = Path(__file__).resolve().parent.parent
# Validate Python version
if platform.python_version_tuple() < ('3', '6'):
raise RuntimeError(
f"PyInv requires Python 3.6 or higher (current: Python {platform.python_version()})"
)
#
# Configuration import
#
# Import configuration parameters
try:
from pyinv import configuration
except ModuleNotFoundError as e:
if getattr(e, 'name') == 'configuration':
raise ImproperlyConfigured(
"Configuration file is not present. Please define pyinv/pyinv/configuration.py per the documentation."
)
raise
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(
"Required parameter {} is missing from configuration.py.".format(parameter)
)
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set optional parameters
ACCOUNT_ACTIVATION_DAYS = getattr(configuration, 'ACCOUNT_ACTIVATION_DAYS', 7)
ALLOW_NON_CONTAINERS_IN_ROOT = getattr(configuration, 'ALLOW_NON_CONTAINERS_IN_ROOT', False)
ADMINS = getattr(configuration, 'ADMINS', [])
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
EMAIL = getattr(configuration, 'EMAIL', {})
NEW_ASSET_CODE_TYPE = getattr(configuration, 'NEW_ASSET_CODE_TYPE', 'P')
REGISTRATION_OPEN = getattr(configuration, 'REGISTRATION_OPEN', True)
PYINV_ASSET_CODE_PREFIX = getattr(configuration, 'PYINV_ASSET_CODE_PREFIX', "INV")
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
SYSTEM_TITLE = getattr(configuration, 'SYSTEM_TITLE', 'PyInv')
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
#
# Database
#
DATABASES = {
'default': DATABASE
}
#
# Email
#
EMAIL_BACKEND = EMAIL.get('BACKEND', 'django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')
EMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')
EMAIL_SUBJECT_PREFIX = EMAIL.get('SUBJECT_PREFIX', '[PyInv] ')
EMAIL_USE_SSL = EMAIL.get('USE_SSL', False)
EMAIL_USE_TLS = EMAIL.get('USE_TLS', False)
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# First Party
'accounts',
'inventory',
# Third Party
'crispy_forms',
'django_registration',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"accounts.middleware.ProfileRequiredMiddleware",
]
ROOT_URLCONF = 'pyinv.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / "templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'pyinv.context_processors.pyinv_settings',
],
},
},
]
WSGI_APPLICATION = 'pyinv.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = str(BASE_DIR) + '/static'
STATIC_URL = f'/{BASE_PATH}static/'
# Authentication URLs
LOGIN_URL = '/{}login/'.format(BASE_PATH)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MESSAGE_TAGS = {
messages.DEBUG: "alert-info",
messages.INFO: "alert-info",
messages.SUCCESS: "alert-success",
messages.WARNING: "alert-warning",
messages.ERROR: "alert-danger",
}
CRISPY_TEMPLATE_PACK = "bootstrap4"
#
# Rest Framework
#
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
}
| [
"platform.node",
"pathlib.Path",
"platform.python_version_tuple",
"django.core.exceptions.ImproperlyConfigured",
"platform.python_version"
] | [((227, 242), 'platform.node', 'platform.node', ([], {}), '()\n', (240, 242), False, 'import platform\n'), ((363, 394), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (392, 394), False, 'import platform\n'), ((729, 863), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""Configuration file is not present. Please define pyinv/pyinv/configuration.py per the documentation."""'], {}), "(\n 'Configuration file is not present. Please define pyinv/pyinv/configuration.py per the documentation.'\n )\n", (749, 863), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((294, 308), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (298, 308), False, 'from pathlib import Path\n'), ((497, 522), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (520, 522), False, 'import platform\n')] |
#!/usr/bin/python
"""
Category: API Script
Author: nouse4it <<EMAIL>>
get_devices_ISE_API.py
Illustrate the following conecepts:
- Get all network devices for from ISE and store them in csv format
"""
__author__ = "nouse4it"
__author_email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2020 nouse4it"
# Importing all needed Modules
import json
import csv
import pprint
import requests
import urllib3
import sys
import getpass
requests.packages.urllib3.disable_warnings()
ise_ip = input('Enter IP Address of ISE: ')
api_user = input('Enter API Username: ')
api_pw = getpass.getpass(prompt='Enter API Password: ')
#------------------------------------------------------------------------------
def get_devices():
url = 'https://{}:9060/ers/config/networkdevice/'.format(ise_ip)
headers = {'ACCEPT': 'application/json','content-type': 'application/json'}
req = requests.get(url, headers=headers, auth=(api_user, api_pw), verify=False)
total = (json.loads(req.text))
items = total['SearchResult']['total']
page = items / 100
pages = round(page)
devices = []
with open ('export_ise.csv', 'w') as f:
writer = csv.writer(f)
for i in range(1,pages+1):
url_page = 'https://{}:9060/ers/config/networkdevice?size=100&page={}'.format(ise_ip,i)
req_page = requests.get(url_page, headers=headers, auth=(api_user, api_pw), verify=False)
myjson = req_page.text
parsed_json = (json.loads(myjson))
for device in parsed_json['SearchResult']['resources']:
devices.append(device['id'])
for id in devices:
url_page = 'https://{}:9060/ers/config/networkdevice/{}'.format(ise_ip,id)
req_page = requests.get(url_page, headers=headers, auth=(api_user, api_pw), verify=False)
myjson = req_page.text
parsed_json = (json.loads(myjson))
name = parsed_json['NetworkDevice']['name']
ip = (parsed_json['NetworkDevice']['NetworkDeviceIPList'][0]['ipaddress'])
row = [name, ip]
writer.writerow(row)
#==============================================================================
# ---- Main: Get Devices
#==============================================================================
get_devices()
| [
"json.loads",
"requests.packages.urllib3.disable_warnings",
"csv.writer",
"requests.get",
"getpass.getpass"
] | [((430, 474), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (472, 474), False, 'import requests\n'), ((570, 616), 'getpass.getpass', 'getpass.getpass', ([], {'prompt': '"""Enter API Password: """'}), "(prompt='Enter API Password: ')\n", (585, 616), False, 'import getpass\n'), ((876, 949), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'auth': '(api_user, api_pw)', 'verify': '(False)'}), '(url, headers=headers, auth=(api_user, api_pw), verify=False)\n', (888, 949), False, 'import requests\n'), ((963, 983), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (973, 983), False, 'import json\n'), ((1153, 1166), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1163, 1166), False, 'import csv\n'), ((1325, 1403), 'requests.get', 'requests.get', (['url_page'], {'headers': 'headers', 'auth': '(api_user, api_pw)', 'verify': '(False)'}), '(url_page, headers=headers, auth=(api_user, api_pw), verify=False)\n', (1337, 1403), False, 'import requests\n'), ((1466, 1484), 'json.loads', 'json.loads', (['myjson'], {}), '(myjson)\n', (1476, 1484), False, 'import json\n'), ((1736, 1814), 'requests.get', 'requests.get', (['url_page'], {'headers': 'headers', 'auth': '(api_user, api_pw)', 'verify': '(False)'}), '(url_page, headers=headers, auth=(api_user, api_pw), verify=False)\n', (1748, 1814), False, 'import requests\n'), ((1877, 1895), 'json.loads', 'json.loads', (['myjson'], {}), '(myjson)\n', (1887, 1895), False, 'import json\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-25 20:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0014_unique_coupon_code'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='amount_type',
field=models.CharField(choices=[('percent-discount', 'percent-discount'), ('fixed-discount', 'fixed-discount'), ('fixed-price', 'fixed-price')], help_text='Whether amount is a percent or fixed discount', max_length=30),
),
]
| [
"django.db.models.CharField"
] | [((408, 629), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('percent-discount', 'percent-discount'), ('fixed-discount',\n 'fixed-discount'), ('fixed-price', 'fixed-price')]", 'help_text': '"""Whether amount is a percent or fixed discount"""', 'max_length': '(30)'}), "(choices=[('percent-discount', 'percent-discount'), (\n 'fixed-discount', 'fixed-discount'), ('fixed-price', 'fixed-price')],\n help_text='Whether amount is a percent or fixed discount', max_length=30)\n", (424, 629), False, 'from django.db import migrations, models\n')] |
import numpy as np
import pandas as pd
import networkx as nx
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import cosine_similarity
from bokeh.io import show
from bokeh.plotting import figure
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker
from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral8
def visualize_sentences(vecs, sentences, palette="Viridis256"):
tsne = TSNE(n_components=2)
tsne_results = tsne.fit_transform(vecs)
df = pd.DataFrame(columns=['x', 'y', 'sentence'])
df['x'], df['y'], df['sentence'] = tsne_results[:, 0], tsne_results[:, 1], sentences
source = ColumnDataSource(ColumnDataSource.from_df(df))
labels = LabelSet(x="x", y="y", text="sentence", y_offset=8,
text_font_size="12pt", text_color="#555555",
source=source, text_align='center')
color_mapper = LinearColorMapper(palette=palette, low=min(tsne_results[:, 1]), high=max(tsne_results[:, 1]))
plot = figure(plot_width=900, plot_height=900)
plot.scatter("x", "y", size=12, source=source, color={'field': 'y', 'transform': color_mapper}, line_color=None, fill_alpha=0.8)
plot.add_layout(labels)
show(plot, notebook_handle=True)
"""
Visualize homonyms (2d vector space)
Inspired by:
https://github.com/hengluchang/visualizing_contextual_vectors/blob/master/elmo_vis.py
"""
def visualize_homonym(homonym, tokenized_sentences, vecs, model_name, palette="Viridis256"):
# process sentences
token_list, processed_sentences = [], []
for tokens in tokenized_sentences:
token_list.extend(tokens)
sentence = []
for token in tokens:
if model_name == "bert":
processed_token = token.replace("##", "")
else:
processed_token = token
if token == homonym:
processed_token = "\"" + processed_token + "\""
sentence.append(processed_token)
processed_sentences.append(' '.join(sentence))
# dimension reduction
tsne = TSNE(n_components=2)
tsne_results = tsne.fit_transform(vecs[1:])
# only plot the word representation of interest
interest_vecs, idx = np.zeros((len(tokenized_sentences), 2)), 0
for word, vec in zip(token_list, tsne_results):
if word == homonym:
interest_vecs[idx] = vec
idx += 1
df = pd.DataFrame(columns=['x', 'y', 'annotation'])
df['x'], df['y'], df['annotation'] = interest_vecs[:, 0], interest_vecs[:, 1], processed_sentences
source = ColumnDataSource(ColumnDataSource.from_df(df))
labels = LabelSet(x="x", y="y", text="annotation", y_offset=8,
text_font_size="12pt", text_color="#555555",
source=source, text_align='center')
color_mapper = LinearColorMapper(palette=palette, low=min(tsne_results[:, 1]), high=max(tsne_results[:, 1]))
plot = figure(plot_width=900, plot_height=900)
plot.scatter("x", "y", size=12, source=source, color={'field': 'y', 'transform': color_mapper},
line_color=None,
fill_alpha=0.8)
plot.add_layout(labels)
show(plot, notebook_handle=True)
def visualize_between_sentences(sentences, vec_list, palette="Viridis256"):
df_list, score_list = [], []
for sent1_idx, sentence1 in enumerate(sentences):
for sent2_idx, sentence2 in enumerate(sentences):
vec1, vec2 = vec_list[sent1_idx], vec_list[sent2_idx]
if np.any(vec1) and np.any(vec2):
score = cosine_similarity(X=[vec1], Y=[vec2])
df_list.append({'x': sentence1, 'y': sentence2, 'similarity': score[0][0]})
score_list.append(score[0][0])
df = pd.DataFrame(df_list)
color_mapper = LinearColorMapper(palette=palette, low=np.max(score_list), high=np.min(score_list))
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(x_range=sentences, y_range=list(reversed(sentences)),
x_axis_location="above", plot_width=900, plot_height=900,
toolbar_location='below', tools=TOOLS,
tooltips=[('sentences', '@x @y'), ('similarity', '@similarity')])
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 3.14 / 3
p.rect(x="x", y="y", width=1, height=1,
source=df,
fill_color={'field': 'similarity', 'transform': color_mapper},
line_color=None)
color_bar = ColorBar(ticker=BasicTicker(desired_num_ticks=5),
color_mapper=color_mapper, major_label_text_font_size="7pt",
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
show(p)
def visualize_self_attention_scores(tokens, scores, palette="Viridis256"):
mean_prob = np.mean(scores)
weighted_edges = []
for idx_1, token_prob_dist_1 in enumerate(scores):
for idx_2, el in enumerate(token_prob_dist_1):
if idx_1 == idx_2 or el < mean_prob:
weighted_edges.append((tokens[idx_1], tokens[idx_2], 0))
else:
weighted_edges.append((tokens[idx_1], tokens[idx_2], el))
min_prob = np.min([el[2] for el in weighted_edges])
max_prob = np.max([el[2] for el in weighted_edges])
weighted_edges = [(el[0], el[1], (el[2] - mean_prob) / (max_prob - mean_prob)) for el in weighted_edges]
G = nx.Graph()
G.add_nodes_from([el for el in tokens])
G.add_weighted_edges_from(weighted_edges)
plot = Plot(plot_width=500, plot_height=500,
x_range=Range1d(-1.1, 1.1), y_range=Range1d(-1.1, 1.1))
plot.add_tools(HoverTool(tooltips=None), TapTool(), BoxSelectTool())
graph_renderer = from_networkx(G, nx.circular_layout, scale=1, center=(0, 0))
graph_renderer.node_renderer.data_source.data['colors'] = Spectral8[:len(tokens)]
graph_renderer.node_renderer.glyph = Circle(size=15, line_color=None, fill_color="colors")
graph_renderer.node_renderer.selection_glyph = Circle(size=15, fill_color="colors")
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color="grey")
graph_renderer.edge_renderer.data_source.data["line_width"] = [G.get_edge_data(a, b)['weight'] * 3 for a, b in
G.edges()]
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC", line_width={'field': 'line_width'})
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color="grey", line_width=5)
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color="grey", line_width=5)
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = EdgesAndLinkedNodes()
plot.renderers.append(graph_renderer)
x, y = zip(*graph_renderer.layout_provider.graph_layout.values())
data = {'x': list(x), 'y': list(y), 'connectionNames': tokens}
source = ColumnDataSource(data)
labels = LabelSet(x='x', y='y', text='connectionNames', source=source, text_align='center')
plot.renderers.append(labels)
plot.add_tools(SaveTool())
show(plot)
def visualize_words(words, vecs, palette="Viridis256"):
tsne = TSNE(n_components=2)
tsne_results = tsne.fit_transform(vecs)
df = pd.DataFrame(columns=['x', 'y', 'word'])
df['x'], df['y'], df['word'] = tsne_results[:, 0], tsne_results[:, 1], list(words)
source = ColumnDataSource(ColumnDataSource.from_df(df))
labels = LabelSet(x="x", y="y", text="word", y_offset=8,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
color_mapper = LinearColorMapper(palette=palette, low=min(tsne_results[:, 1]), high=max(tsne_results[:, 1]))
plot = figure(plot_width=900, plot_height=900)
plot.scatter("x", "y", size=12, source=source, color={'field': 'y', 'transform': color_mapper}, line_color=None,
fill_alpha=0.8)
plot.add_layout(labels)
show(plot, notebook_handle=True)
def visualize_between_words(words, vecs, palette="Viridis256"):
df_list = []
for word1_idx, word1 in enumerate(words):
for word2_idx, word2 in enumerate(words):
vec1 = vecs[word1_idx]
vec2 = vecs[word2_idx]
if np.any(vec1) and np.any(vec2):
score = cosine_similarity(X=[vec1], Y=[vec2])
df_list.append({'x': word1, 'y': word2, 'similarity': score[0][0]})
df = pd.DataFrame(df_list)
color_mapper = LinearColorMapper(palette=palette, low=1, high=0)
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(x_range=list(words), y_range=list(reversed(list(words))),
x_axis_location="above", plot_width=900, plot_height=900,
toolbar_location='below', tools=TOOLS,
tooltips=[('words', '@x @y'), ('similarity', '@similarity')])
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 3.14 / 3
p.rect(x="x", y="y", width=1, height=1,
source=df,
fill_color={'field': 'similarity', 'transform': color_mapper},
line_color=None)
color_bar = ColorBar(ticker=BasicTicker(desired_num_ticks=5),
color_mapper=color_mapper, major_label_text_font_size="7pt",
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
show(p) | [
"bokeh.plotting.figure",
"bokeh.models.MultiLine",
"bokeh.models.ColumnDataSource.from_df",
"bokeh.models.Range1d",
"bokeh.models.graphs.from_networkx",
"bokeh.models.LabelSet",
"numpy.mean",
"bokeh.models.Circle",
"sklearn.metrics.pairwise.cosine_similarity",
"bokeh.models.TapTool",
"bokeh.models.LinearColorMapper",
"sklearn.manifold.TSNE",
"numpy.max",
"numpy.min",
"pandas.DataFrame",
"bokeh.models.BasicTicker",
"bokeh.models.graphs.NodesAndLinkedEdges",
"bokeh.io.show",
"bokeh.models.SaveTool",
"numpy.any",
"bokeh.models.ColumnDataSource",
"bokeh.models.HoverTool",
"bokeh.models.BoxSelectTool",
"bokeh.models.graphs.EdgesAndLinkedNodes",
"networkx.Graph"
] | [((586, 606), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (590, 606), False, 'from sklearn.manifold import TSNE\n'), ((660, 704), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['x', 'y', 'sentence']"}), "(columns=['x', 'y', 'sentence'])\n", (672, 704), True, 'import pandas as pd\n'), ((867, 1003), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""sentence"""', 'y_offset': '(8)', 'text_font_size': '"""12pt"""', 'text_color': '"""#555555"""', 'source': 'source', 'text_align': '"""center"""'}), "(x='x', y='y', text='sentence', y_offset=8, text_font_size='12pt',\n text_color='#555555', source=source, text_align='center')\n", (875, 1003), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((1168, 1207), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(900)', 'plot_height': '(900)'}), '(plot_width=900, plot_height=900)\n', (1174, 1207), False, 'from bokeh.plotting import figure\n'), ((1373, 1405), 'bokeh.io.show', 'show', (['plot'], {'notebook_handle': '(True)'}), '(plot, notebook_handle=True)\n', (1377, 1405), False, 'from bokeh.io import show\n'), ((2229, 2249), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2233, 2249), False, 'from sklearn.manifold import TSNE\n'), ((2565, 2611), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['x', 'y', 'annotation']"}), "(columns=['x', 'y', 'annotation'])\n", (2577, 2611), True, 'import pandas as pd\n'), ((2788, 2926), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""annotation"""', 'y_offset': '(8)', 'text_font_size': '"""12pt"""', 'text_color': '"""#555555"""', 'source': 'source', 'text_align': '"""center"""'}), "(x='x', y='y', text='annotation', y_offset=8, text_font_size='12pt',\n text_color='#555555', source=source, text_align='center')\n", (2796, 2926), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((3091, 3130), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(900)', 'plot_height': '(900)'}), '(plot_width=900, plot_height=900)\n', (3097, 3130), False, 'from bokeh.plotting import figure\n'), ((3330, 3362), 'bokeh.io.show', 'show', (['plot'], {'notebook_handle': '(True)'}), '(plot, notebook_handle=True)\n', (3334, 3362), False, 'from bokeh.io import show\n'), ((3908, 3929), 'pandas.DataFrame', 'pd.DataFrame', (['df_list'], {}), '(df_list)\n', (3920, 3929), True, 'import pandas as pd\n'), ((5005, 5012), 'bokeh.io.show', 'show', (['p'], {}), '(p)\n', (5009, 5012), False, 'from bokeh.io import show\n'), ((5106, 5121), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (5113, 5121), True, 'import numpy as np\n'), ((5485, 5525), 'numpy.min', 'np.min', (['[el[2] for el in weighted_edges]'], {}), '([el[2] for el in weighted_edges])\n', (5491, 5525), True, 'import numpy as np\n'), ((5541, 5581), 'numpy.max', 'np.max', (['[el[2] for el in weighted_edges]'], {}), '([el[2] for el in weighted_edges])\n', (5547, 5581), True, 'import numpy as np\n'), ((5700, 5710), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5708, 5710), True, 'import networkx as nx\n'), ((6018, 6078), 'bokeh.models.graphs.from_networkx', 'from_networkx', (['G', 'nx.circular_layout'], {'scale': '(1)', 'center': '(0, 0)'}), '(G, nx.circular_layout, scale=1, center=(0, 0))\n', (6031, 6078), False, 'from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes\n'), ((6207, 6260), 'bokeh.models.Circle', 'Circle', ([], {'size': '(15)', 'line_color': 'None', 'fill_color': '"""colors"""'}), "(size=15, line_color=None, fill_color='colors')\n", (6213, 6260), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((6312, 6348), 'bokeh.models.Circle', 'Circle', ([], {'size': '(15)', 'fill_color': '"""colors"""'}), "(size=15, fill_color='colors')\n", (6318, 6348), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((6396, 6430), 'bokeh.models.Circle', 'Circle', ([], {'size': '(15)', 'fill_color': '"""grey"""'}), "(size=15, fill_color='grey')\n", (6402, 6430), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((6666, 6733), 'bokeh.models.MultiLine', 'MultiLine', ([], {'line_color': '"""#CCCCCC"""', 'line_width': "{'field': 'line_width'}"}), "(line_color='#CCCCCC', line_width={'field': 'line_width'})\n", (6675, 6733), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((6785, 6827), 'bokeh.models.MultiLine', 'MultiLine', ([], {'line_color': '"""grey"""', 'line_width': '(5)'}), "(line_color='grey', line_width=5)\n", (6794, 6827), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((6875, 6917), 'bokeh.models.MultiLine', 'MultiLine', ([], {'line_color': '"""grey"""', 'line_width': '(5)'}), "(line_color='grey', line_width=5)\n", (6884, 6917), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((6957, 6978), 'bokeh.models.graphs.NodesAndLinkedEdges', 'NodesAndLinkedEdges', ([], {}), '()\n', (6976, 6978), False, 'from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes\n'), ((7018, 7039), 'bokeh.models.graphs.EdgesAndLinkedNodes', 'EdgesAndLinkedNodes', ([], {}), '()\n', (7037, 7039), False, 'from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes\n'), ((7234, 7256), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['data'], {}), '(data)\n', (7250, 7256), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((7270, 7357), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""connectionNames"""', 'source': 'source', 'text_align': '"""center"""'}), "(x='x', y='y', text='connectionNames', source=source, text_align=\n 'center')\n", (7278, 7357), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((7422, 7432), 'bokeh.io.show', 'show', (['plot'], {}), '(plot)\n', (7426, 7432), False, 'from bokeh.io import show\n'), ((7502, 7522), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (7506, 7522), False, 'from sklearn.manifold import TSNE\n'), ((7576, 7616), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['x', 'y', 'word']"}), "(columns=['x', 'y', 'word'])\n", (7588, 7616), True, 'import pandas as pd\n'), ((7777, 7908), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""word"""', 'y_offset': '(8)', 'text_font_size': '"""8pt"""', 'text_color': '"""#555555"""', 'source': 'source', 'text_align': '"""center"""'}), "(x='x', y='y', text='word', y_offset=8, text_font_size='8pt',\n text_color='#555555', source=source, text_align='center')\n", (7785, 7908), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((8073, 8112), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(900)', 'plot_height': '(900)'}), '(plot_width=900, plot_height=900)\n', (8079, 8112), False, 'from bokeh.plotting import figure\n'), ((8295, 8327), 'bokeh.io.show', 'show', (['plot'], {'notebook_handle': '(True)'}), '(plot, notebook_handle=True)\n', (8299, 8327), False, 'from bokeh.io import show\n'), ((8778, 8799), 'pandas.DataFrame', 'pd.DataFrame', (['df_list'], {}), '(df_list)\n', (8790, 8799), True, 'import pandas as pd\n'), ((8819, 8868), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'palette': 'palette', 'low': '(1)', 'high': '(0)'}), '(palette=palette, low=1, high=0)\n', (8836, 8868), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((9837, 9844), 'bokeh.io.show', 'show', (['p'], {}), '(p)\n', (9841, 9844), False, 'from bokeh.io import show\n'), ((824, 852), 'bokeh.models.ColumnDataSource.from_df', 'ColumnDataSource.from_df', (['df'], {}), '(df)\n', (848, 852), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((2745, 2773), 'bokeh.models.ColumnDataSource.from_df', 'ColumnDataSource.from_df', (['df'], {}), '(df)\n', (2769, 2773), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((5942, 5966), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'None'}), '(tooltips=None)\n', (5951, 5966), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((5968, 5977), 'bokeh.models.TapTool', 'TapTool', ([], {}), '()\n', (5975, 5977), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((5979, 5994), 'bokeh.models.BoxSelectTool', 'BoxSelectTool', ([], {}), '()\n', (5992, 5994), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((7406, 7416), 'bokeh.models.SaveTool', 'SaveTool', ([], {}), '()\n', (7414, 7416), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((7734, 7762), 'bokeh.models.ColumnDataSource.from_df', 'ColumnDataSource.from_df', (['df'], {}), '(df)\n', (7758, 7762), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((3988, 4006), 'numpy.max', 'np.max', (['score_list'], {}), '(score_list)\n', (3994, 4006), True, 'import numpy as np\n'), ((4013, 4031), 'numpy.min', 'np.min', (['score_list'], {}), '(score_list)\n', (4019, 4031), True, 'import numpy as np\n'), ((4762, 4794), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {'desired_num_ticks': '(5)'}), '(desired_num_ticks=5)\n', (4773, 4794), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((5875, 5893), 'bokeh.models.Range1d', 'Range1d', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (5882, 5893), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((5903, 5921), 'bokeh.models.Range1d', 'Range1d', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (5910, 5921), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((9592, 9624), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {'desired_num_ticks': '(5)'}), '(desired_num_ticks=5)\n', (9603, 9624), False, 'from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker\n'), ((3667, 3679), 'numpy.any', 'np.any', (['vec1'], {}), '(vec1)\n', (3673, 3679), True, 'import numpy as np\n'), ((3684, 3696), 'numpy.any', 'np.any', (['vec2'], {}), '(vec2)\n', (3690, 3696), True, 'import numpy as np\n'), ((3722, 3759), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ([], {'X': '[vec1]', 'Y': '[vec2]'}), '(X=[vec1], Y=[vec2])\n', (3739, 3759), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((8592, 8604), 'numpy.any', 'np.any', (['vec1'], {}), '(vec1)\n', (8598, 8604), True, 'import numpy as np\n'), ((8609, 8621), 'numpy.any', 'np.any', (['vec2'], {}), '(vec2)\n', (8615, 8621), True, 'import numpy as np\n'), ((8647, 8684), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ([], {'X': '[vec1]', 'Y': '[vec2]'}), '(X=[vec1], Y=[vec2])\n', (8664, 8684), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
# Solfec-2.0 unit test: OUTPUT input command
import unittest, os
output0 = \
'''OUTPUT_0_entities = ['AREA','BPAIR','CF','CFN','CFT','COLOR','CPAIR','DISPL','LINVEL','NUMBER','STRESS']
OUTPUT_0_modes = ['CD','EL','MESH','SURF']
OUTPUT_1_entities = ['COLOR','DISPL']
OUTPUT_1_subset = [0]
OUTPUT_1_modes = ['CD','EL','MESH','SURF']
OUTPUT_2_entities = ['LINVEL','STRESS']
OUTPUT_2_subset = [1]
OUTPUT_2_modes = ['MESH']
OUTPUT_3_entities = ['AREA','CF','CPAIR']
OUTPUT_3_subset = [0,1]
OUTPUT_3_modes = ['CD']
'''
class test(unittest.TestCase):
def test(self):
print('\ntesting OUTPUT command')
solfec = os.popen('../solfec4 output.py')
output = solfec.read()
solfec.close()
self.assertEqual(output, output0)
| [
"os.popen"
] | [((615, 647), 'os.popen', 'os.popen', (['"""../solfec4 output.py"""'], {}), "('../solfec4 output.py')\n", (623, 647), False, 'import unittest, os\n')] |
"""Fixtures and setup for tests."""
from unittest.mock import patch
import logging
import pytest
from pytest_socket import disable_socket
from midea_beautiful.util import clear_sensitive, very_verbose
# pylint: disable=missing-function-docstring
def pytest_runtest_setup():
disable_socket()
@pytest.fixture(autouse=True)
def log_warning(caplog):
"""Automatically set log level to WARNING"""
caplog.set_level(logging.WARNING)
@pytest.fixture(name="mock_cloud")
def mock_cloud():
"""Fixture that mocks Midea cloud API client"""
with patch("midea_beautiful.cloud.MideaCloud") as cloud:
return cloud
@pytest.fixture(autouse=True)
def clean_logging_setup_state():
# Code that will run before your test, for example:
clear_sensitive()
very_verbose(False)
# A test function will be run at this point
yield
# Code that will run after your test, for example:
clear_sensitive()
| [
"midea_beautiful.util.very_verbose",
"pytest_socket.disable_socket",
"pytest.fixture",
"midea_beautiful.util.clear_sensitive",
"unittest.mock.patch"
] | [((303, 331), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (317, 331), False, 'import pytest\n'), ((447, 480), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""mock_cloud"""'}), "(name='mock_cloud')\n", (461, 480), False, 'import pytest\n'), ((636, 664), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (650, 664), False, 'import pytest\n'), ((283, 299), 'pytest_socket.disable_socket', 'disable_socket', ([], {}), '()\n', (297, 299), False, 'from pytest_socket import disable_socket\n'), ((758, 775), 'midea_beautiful.util.clear_sensitive', 'clear_sensitive', ([], {}), '()\n', (773, 775), False, 'from midea_beautiful.util import clear_sensitive, very_verbose\n'), ((780, 799), 'midea_beautiful.util.very_verbose', 'very_verbose', (['(False)'], {}), '(False)\n', (792, 799), False, 'from midea_beautiful.util import clear_sensitive, very_verbose\n'), ((917, 934), 'midea_beautiful.util.clear_sensitive', 'clear_sensitive', ([], {}), '()\n', (932, 934), False, 'from midea_beautiful.util import clear_sensitive, very_verbose\n'), ((560, 601), 'unittest.mock.patch', 'patch', (['"""midea_beautiful.cloud.MideaCloud"""'], {}), "('midea_beautiful.cloud.MideaCloud')\n", (565, 601), False, 'from unittest.mock import patch\n')] |
from experiment_tracker import Experiment
# 1. Specify the database connection details.
database_config = {'host': 'localhost',
'port': 3306,
'user': 'root',
'passwd': '<PASSWORD>',
'db': 'experiment_tracker'}
# 2. Create an experiment. The project name is used to determine the table name of the corresponding SQL table. All experiments of the same project will be stored in the same table.
experiment = Experiment(database_config=database_config, project_name='hello_world_project')
# 3. Configure parameters of the project such as dataset name, or learning rate. Each parameter will be stored in an SQL column in the project's SQL table. The columns is created automatically and type of the column is determined based on the type of the value of the parameter.
experiment.parameters['dataset_train'] = 'train_set'
experiment.parameters['dataset_test'] = 'test_set'
experiment.parameters['learning_rate'] = 0.001
experiment.parameters['num_runs'] = 3
experiment.parameters['use_dropout'] = False
# 4. Run experiments and collect results.
# dataset_train = load_dataset(experiment.parameters['dataset_train'])
# dataset_test = load_dataset(experiment.parameters['dataset_test'])
for seed in range(0, experiment.parameters['num_runs']):
experiment.parameters['seed'] = seed # add/modify parameters in case they change during an experiment run
# model = train_model(dataset_train, experiment) # 'experiment' contains all parameters for training, no need to change functions calls after adding additional parameters
experiment.results['runtime'] = 1.5
experiment.results['train_loss'] = 0.1
# evaluation = evaluate_model(dataset_test) # results can be collected at different locations in the code
experiment.results['test_loss'] = 0.3
# 5. Save parameters and corresponding results to the database.
experiment.save_results()
# The stored results can be analyzed later, e.g. by directly querying the database.
# For example, 'SELECT timestamp, AVG(test_loss) FROM hello_world_project GROUP BY timestamp' computes the average test loss of the three different seeds for each performed experiment run. | [
"experiment_tracker.Experiment"
] | [((487, 566), 'experiment_tracker.Experiment', 'Experiment', ([], {'database_config': 'database_config', 'project_name': '"""hello_world_project"""'}), "(database_config=database_config, project_name='hello_world_project')\n", (497, 566), False, 'from experiment_tracker import Experiment\n')] |
from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
import random
from concept_formation.datasets import load_iris
from concept_formation.datasets import load_mushroom
from concept_formation.datasets import load_molecule
from concept_formation.datasets import load_rb_s_07
from concept_formation.datasets import load_rb_s_13
from concept_formation.datasets import load_rb_wb_03
from concept_formation.datasets import load_rb_com_11
from concept_formation.datasets import load_quadruped
from concept_formation.datasets import load_forest_fires
from concept_formation.datasets import load_congressional_voting
from concept_formation.datasets import load_rb_s_07_human_predictions
def test_load_forest_fires():
data = load_forest_fires(num_instances=1)
known = {'DC': 94.3, 'DMC': 26.2, 'FFMC': 86.2, 'ISI': 5.1, 'RH': 51.0,
'area': 0.0, 'day': 'fri', 'month': 'mar', 'rain': 0.0, 'temp':
8.2, 'wind': 6.7, 'x-axis': 7.0, 'y-axis': 5.0}
assert known == data[0]
def test_load_congressional_voting():
data = load_congressional_voting(num_instances=1)
known = {'Class Name': 'republican', 'adoption-of-the-budget-resolution':
'n', 'aid-to-nicaraguan-contras': 'n', 'anti-satellite-test-ban':
'n', 'crime': 'y', 'duty-free-exports': 'n', 'education-spending':
'y', 'el-salvador-aid': 'y',
'export-administration-act-south-africa': 'y',
'handicapped-infants': 'n', 'immigration': 'y', 'mx-missile': 'n',
'physician-fee-freeze': 'y', 'religious-groups-in-schools': 'y',
'superfund-right-to-sue': 'y', 'water-project-cost-sharing': 'y'}
assert known == data[0]
def test_load_iris():
data = load_iris(num_instances=1)
known = {'class': 'Iris-setosa', 'petal length': 1.4, 'petal width': 0.2,
'sepal length': 5.1, 'sepal width': 3.5}
assert known == data[0]
def test_load_mushroom():
data = load_mushroom(num_instances=1)
known = {'bruises?': 'yes', 'cap-color': 'brown', 'cap-shape': 'convex',
'cap-surface': 'smooth', 'classification': 'poisonous',
'gill-attachment': 'free', 'gill-color': 'black', 'gill-size':
'narrow', 'gill-spacing': 'closed', 'habitat': 'urban', 'odor':
'pungent', 'population': 'scattered', 'ring-number': 'one',
'ring-type': 'pendant', 'spore-print-color': 'black',
'stalk-color-above-ring': 'white', 'stalk-color-below-ring':
'white', 'stalk-root': 'equal', 'stalk-shape': 'enlarging',
'stalk-surface-above-ring': 'smooth', 'stalk-surface-below-ring':
'smooth', 'veil-color': 'white', 'veil-type': 'partial'}
assert known == data[0]
def test_load_rb_com_11():
data = load_rb_com_11(num_instances=1)
known = {'_guid': 'ea022d3d-5c9e-46d7-be23-8ea718fe7816',
'_human_cluster_label': '0', 'component0': {'b': 1.0, 'l': 0.0,
'r': 1.0, 't': 2.0,
'type': 'cube0'},
'component1': {'b': 3.0, 'l': 2.0, 'r': 3.0, 't': 4.0, 'type':
'cube0'}, 'component14': {'b': 4.0, 'l': 1.0, 'r':
4.0, 't': 5.0, 'type':
'ufoo0'}, 'component2':
{'b': 1.0, 'l': 1.0, 'r': 4.0, 't': 2.0, 'type': 'plat0'},
'component3': {'b': 2.0, 'l': 1.0, 'r': 4.0, 't': 3.0, 'type':
'plat0'}, 'component4': {'b': 0.0, 'l': 0.0, 'r':
5.0, 't': 1.0, 'type':
'rect0'}}
assert known == data[0]
def test_load_rb_s_07():
data = load_rb_s_07(num_instances=1)
known = {'_guid': '660ac76d-93b3-4ce7-8a15-a3213e9103f5', 'component0':
{'b': 0.0, 'l': 0.0, 'r': 3.0, 't': 1.0, 'type': 'plat0'},
'component1': {'b': 1.0, 'l': 1.0, 'r': 2.0, 't': 4.0, 'type':
'plat90'}, 'component8': {'b': 4.0, 'l': 0.0, 'r':
3.0, 't': 5.0, 'type':
'ufoo0'}, 'success': '0'}
assert known == data[0]
def test_load_rb_s_13():
data = load_rb_s_13(num_instances=1)
known = {'_guid': '684b4ce5-0f55-481c-ae9a-1474de8418ea',
'_human_cluster_label': '0', 'component0': {'b': 3.0, 'l': 2.0,
'r': 3.0, 't': 4.0,
'type': 'cube0'},
'component1': {'b': 4.0, 'l': 2.0, 'r': 3.0, 't': 5.0, 'type':
'cube0'}, 'component14': {'b': 0.0, 'l': 0.0, 'r':
4.0, 't': 1.0, 'type':
'trap0'}, 'component15':
{'b': 5.0, 'l': 1.0, 'r': 3.0, 't': 6.0, 'type': 'ufoo0'},
'component2': {'b': 1.0, 'l': 0.0, 'r': 3.0, 't': 2.0, 'type':
'plat0'}, 'component3': {'b': 2.0, 'l': 0.0, 'r':
3.0, 't': 3.0, 'type':
'plat0'}}
assert data[0] == known
def test_load_rb_wb_03():
data = load_rb_wb_03(num_instances=1)
known = {'_guid': 'aa5eff72-0572-4eff-a007-3def9a82ba5b',
'_human_cluster_label': '0', 'component0': {'b': 2.0, 'l': 2.0,
'r': 3.0, 't': 3.0,
'type': 'cube0'},
'component1': {'b': 2.0, 'l': 3.0, 'r': 4.0, 't': 3.0, 'type':
'cube0'}, 'component11': {'b': 3.0, 'l': 1.0, 'r':
4.0, 't': 4.0, 'type':
'ufoo0'}, 'component2':
{'b': 1.0, 'l': 2.0, 'r': 5.0, 't': 2.0, 'type': 'plat0'},
'component3': {'b': 0.0, 'l': 0.0, 'r': 5.0, 't': 1.0, 'type':
'rect0'}}
assert known == data[0]
def test_rb_s_07_human_predictions():
data = load_rb_s_07_human_predictions()
known = ['user_id,instance_guid,time,order,prediction,correctness',
'1,2fda0bde-95a7-4bda-9851-785275c3f56d,2015-02-15 '
'19:21:14.327344+00:00,1,0,1']
assert known == data[0:2]
def test_load_quadruped():
random.seed(0)
data = load_quadruped(10)
assert len(data) == 10
assert 'head' in data[0]
assert 'leg1' in data[0]
assert 'tail' in data[0]
def test_load_molecule():
data = load_molecule()
known = {'(bond Single Not_stereo ?atom0001 ?atom0003)': True,
'(bond Single Not_stereo ?atom0001 ?atom0014)': True,
'(bond Single Not_stereo ?atom0002 ?atom0004)': True,
'(bond Single Not_stereo ?atom0002 ?atom0012)': True,
'(bond Single Not_stereo ?atom0002 ?atom0013)': True,
'(bond Single Not_stereo ?atom0003 ?atom0004)': True,
'(bond Single Not_stereo ?atom0003 ?atom0005)': True,
'(bond Single Not_stereo ?atom0003 ?atom0006)': True,
'(bond Single Not_stereo ?atom0004 ?atom0007)': True,
'(bond Single Not_stereo ?atom0004 ?atom0008)': True,
'(bond Single Not_stereo ?atom0005 ?atom0009)': True,
'(bond Single Not_stereo ?atom0005 ?atom0010)': True,
'(bond Single Not_stereo ?atom0005 ?atom0011)': True,
'?atom0001': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'O',
'valence': 'no marking',
'x': 2.5369,
'y': 0.75,
'z': 0.0},
'?atom0002': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'N',
'valence': 'no marking',
'x': 5.135,
'y': 0.25,
'z': 0.0},
'?atom0003': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'unmarked',
'symbol': 'C',
'valence': 'no marking',
'x': 3.403,
'y': 0.25,
'z': 0.0},
'?atom0004': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'C',
'valence': 'no marking',
'x': 4.269,
'y': 0.75,
'z': 0.0},
'?atom0005': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'C',
'valence': 'no marking',
'x': 3.403,
'y': -0.75,
'z': 0.0},
'?atom0006': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 3.403,
'y': 1.1,
'z': 0.0},
'?atom0007': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 4.6675,
'y': 1.225,
'z': 0.0},
'?atom0008': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 3.8705,
'y': 1.225,
'z': 0.0},
'?atom0009': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 2.783,
'y': -0.75,
'z': 0.0},
'?atom0010': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 3.403,
'y': -1.37,
'z': 0.0},
'?atom0011': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 4.023,
'y': -0.75,
'z': 0.0},
'?atom0012': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 5.672,
'y': 0.56,
'z': 0.0},
'?atom0013': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 5.135,
'y': -0.37,
'z': 0.0},
'?atom0014': {'charge': 'outside_limits',
'hydrogen_count': 'H0',
'mass_diff': '0',
'stereo_parity': 'not_stereo',
'symbol': 'H',
'valence': 'no marking',
'x': 2.0,
'y': 0.44,
'z': 0.0},
'_name': '4',
'_software': '-OEChem-03201502492D',
'_version': 'V2000',
'chiral': True}
assert known == data[3]
| [
"concept_formation.datasets.load_rb_com_11",
"concept_formation.datasets.load_mushroom",
"concept_formation.datasets.load_quadruped",
"concept_formation.datasets.load_rb_s_07",
"concept_formation.datasets.load_forest_fires",
"concept_formation.datasets.load_rb_s_07_human_predictions",
"concept_formation.datasets.load_rb_wb_03",
"random.seed",
"concept_formation.datasets.load_iris",
"concept_formation.datasets.load_congressional_voting",
"concept_formation.datasets.load_molecule",
"concept_formation.datasets.load_rb_s_13"
] | [((775, 809), 'concept_formation.datasets.load_forest_fires', 'load_forest_fires', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (792, 809), False, 'from concept_formation.datasets import load_forest_fires\n'), ((1103, 1145), 'concept_formation.datasets.load_congressional_voting', 'load_congressional_voting', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (1128, 1145), False, 'from concept_formation.datasets import load_congressional_voting\n'), ((1785, 1811), 'concept_formation.datasets.load_iris', 'load_iris', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (1794, 1811), False, 'from concept_formation.datasets import load_iris\n'), ((2011, 2041), 'concept_formation.datasets.load_mushroom', 'load_mushroom', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (2024, 2041), False, 'from concept_formation.datasets import load_mushroom\n'), ((2845, 2876), 'concept_formation.datasets.load_rb_com_11', 'load_rb_com_11', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (2859, 2876), False, 'from concept_formation.datasets import load_rb_com_11\n'), ((3909, 3938), 'concept_formation.datasets.load_rb_s_07', 'load_rb_s_07', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (3921, 3938), False, 'from concept_formation.datasets import load_rb_s_07\n'), ((4465, 4494), 'concept_formation.datasets.load_rb_s_13', 'load_rb_s_13', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (4477, 4494), False, 'from concept_formation.datasets import load_rb_s_13\n'), ((5529, 5559), 'concept_formation.datasets.load_rb_wb_03', 'load_rb_wb_03', ([], {'num_instances': '(1)'}), '(num_instances=1)\n', (5542, 5559), False, 'from concept_formation.datasets import load_rb_wb_03\n'), ((6426, 6458), 'concept_formation.datasets.load_rb_s_07_human_predictions', 'load_rb_s_07_human_predictions', ([], {}), '()\n', (6456, 6458), False, 'from concept_formation.datasets import load_rb_s_07_human_predictions\n'), ((6704, 6718), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (6715, 6718), False, 'import random\n'), ((6730, 6748), 'concept_formation.datasets.load_quadruped', 'load_quadruped', (['(10)'], {}), '(10)\n', (6744, 6748), False, 'from concept_formation.datasets import load_quadruped\n'), ((6903, 6918), 'concept_formation.datasets.load_molecule', 'load_molecule', ([], {}), '()\n', (6916, 6918), False, 'from concept_formation.datasets import load_molecule\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 29 15:55:49 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
#%%
train = pd.read_csv("D:\\Kaggle\\Santander\\Data\\train.csv")
columns = train.columns.tolist()
columns.remove("ID")
columns.remove("target")
columns = np.asarray(columns)
x = train.loc[:, columns].values
#%%
std_values = StandardScaler().fit_transform(x)
pca = PCA(n_components=4000)
principleComponents = pca.fit_transform(std_values)
#%%
variance = pca.explained_variance_ratio_
variance_ratio = np.cumsum(np.round(variance, decimals=10)*100)
#%%
plt.plot(variance_ratio)
#%%
TakenPCA = pd.DataFrame(principleComponents[:,:1210])
#TakenPCA["ID"] = train["ID"]
#TakenPCA["target"] = train["target"]
#%%
train_x = TakenPCA
train_y = train["target"]
#%%
LR = LinearRegression()
LR.fit(train_x, train_y)
#%%
submission = pd.read_csv("D:\\Kaggle\\Santander\\Data\\test.csv")
submission = submission.fillna("0")
columns = submission.columns.tolist()
columns.remove("ID")
columns = np.asarray(columns)
x = submission.loc[:, columns].values
#%%
std_values = StandardScaler().fit_transform(x)
pca = PCA(n_components=1210)
principleComponents = pca.fit_transform(std_values)
#%%
TakenPCA = pd.DataFrame(principleComponents[:,:1210])
prediction = LR.predict(TakenPCA)
submission_file = pd.DataFrame(np.abs(prediction), columns = ["Target"])
submission_file["ID"] = submission["ID"]
submission_file = submission_file[["ID", "Target"]]
submission_file.to_csv("D:\\Kaggle\\Santander\\Data\\submission.csv", index=False)
| [
"numpy.abs",
"pandas.read_csv",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.plot",
"numpy.asarray",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"numpy.round"
] | [((432, 485), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Kaggle\\\\Santander\\\\Data\\\\train.csv"""'], {}), "('D:\\\\Kaggle\\\\Santander\\\\Data\\\\train.csv')\n", (443, 485), True, 'import pandas as pd\n'), ((577, 596), 'numpy.asarray', 'np.asarray', (['columns'], {}), '(columns)\n', (587, 596), True, 'import numpy as np\n'), ((689, 711), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(4000)'}), '(n_components=4000)\n', (692, 711), False, 'from sklearn.decomposition import PCA\n'), ((880, 904), 'matplotlib.pyplot.plot', 'plt.plot', (['variance_ratio'], {}), '(variance_ratio)\n', (888, 904), True, 'import matplotlib.pyplot as plt\n'), ((921, 964), 'pandas.DataFrame', 'pd.DataFrame', (['principleComponents[:, :1210]'], {}), '(principleComponents[:, :1210])\n', (933, 964), True, 'import pandas as pd\n'), ((1091, 1109), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1107, 1109), False, 'from sklearn.linear_model import LinearRegression\n'), ((1153, 1205), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Kaggle\\\\Santander\\\\Data\\\\test.csv"""'], {}), "('D:\\\\Kaggle\\\\Santander\\\\Data\\\\test.csv')\n", (1164, 1205), True, 'import pandas as pd\n'), ((1312, 1331), 'numpy.asarray', 'np.asarray', (['columns'], {}), '(columns)\n', (1322, 1331), True, 'import numpy as np\n'), ((1429, 1451), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1210)'}), '(n_components=1210)\n', (1432, 1451), False, 'from sklearn.decomposition import PCA\n'), ((1521, 1564), 'pandas.DataFrame', 'pd.DataFrame', (['principleComponents[:, :1210]'], {}), '(principleComponents[:, :1210])\n', (1533, 1564), True, 'import pandas as pd\n'), ((1630, 1648), 'numpy.abs', 'np.abs', (['prediction'], {}), '(prediction)\n', (1636, 1648), True, 'import numpy as np\n'), ((649, 665), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (663, 665), False, 'from sklearn.preprocessing import StandardScaler\n'), ((838, 869), 'numpy.round', 'np.round', (['variance'], {'decimals': '(10)'}), '(variance, decimals=10)\n', (846, 869), True, 'import numpy as np\n'), ((1389, 1405), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1403, 1405), False, 'from sklearn.preprocessing import StandardScaler\n')] |
from flask import Blueprint,request,jsonify,abort,url_for
from . import db
from .model import Link
from .schema import url_schema
from .routes import get_unique_id
api = Blueprint("api", __name__)
@api.get("/")
def api_index():
return jsonify(msg="connected")
@api.get("/url/<unique_id>")
def get_url(unique_id):
link = Link.query.filter_by(short_url = unique_id).first_or_404()
return jsonify(_url = url_schema.dump(link),short_url=url_for("route.get_url",unique_id=link.short_url))
@api.post("/add_url")
def add_url():
data = request.get_json(force=True)
short_url = None
try:
url = data["url"]
unique_id = get_unique_id()
add = Link(
original_url = url,
short_url = unique_id
)
db.session.add(add)
db.session.commit()
except KeyError:
abort(400)
else:
return jsonify(msg="sucess add url",unique_id=unique_id)
"""
""" | [
"flask.url_for",
"flask.request.get_json",
"flask.abort",
"flask.Blueprint",
"flask.jsonify"
] | [((171, 197), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {}), "('api', __name__)\n", (180, 197), False, 'from flask import Blueprint, request, jsonify, abort, url_for\n'), ((242, 266), 'flask.jsonify', 'jsonify', ([], {'msg': '"""connected"""'}), "(msg='connected')\n", (249, 266), False, 'from flask import Blueprint, request, jsonify, abort, url_for\n'), ((549, 577), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (565, 577), False, 'from flask import Blueprint, request, jsonify, abort, url_for\n'), ((879, 929), 'flask.jsonify', 'jsonify', ([], {'msg': '"""sucess add url"""', 'unique_id': 'unique_id'}), "(msg='sucess add url', unique_id=unique_id)\n", (886, 929), False, 'from flask import Blueprint, request, jsonify, abort, url_for\n'), ((449, 499), 'flask.url_for', 'url_for', (['"""route.get_url"""'], {'unique_id': 'link.short_url'}), "('route.get_url', unique_id=link.short_url)\n", (456, 499), False, 'from flask import Blueprint, request, jsonify, abort, url_for\n'), ((843, 853), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (848, 853), False, 'from flask import Blueprint, request, jsonify, abort, url_for\n')] |
from twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class SnoOper(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerNoticeOper"
def actions(self):
return [ ("oper", 1, self.sendOperNotice),
("operfail", 1, self.sendOperFailNotice),
("servernoticetype", 1, self.checkSnoType) ]
def serverCommands(self):
return [ ("OPERFAILNOTICE", 1, self) ]
def sendOperNotice(self, user):
if user.uuid[:3] == self.ircd.serverID:
snodata = {
"mask": "oper",
"message": "{} has opered.".format(user.nick)
}
else:
snodata = {
"mask": "remoteoper",
"message": "{} has opered. (from {})".format(user.nick, self.ircd.servers[user.uuid[:3]].name)
}
self.ircd.runActionProcessing("sendservernotice", snodata)
def sendOperFailNotice(self, user, reason):
snodata = {
"mask": "oper",
"message": "Failed OPER attempt from {} ({})".format(user.nick, reason)
}
self.ircd.runActionProcessing("sendservernotice", snodata)
self.ircd.broadcastToServers(None, "OPERFAILNOTICE", user.uuid, reason, prefix=self.ircd.serverID)
def checkSnoType(self, user, typename):
if typename == "oper":
return True
if typename == "remoteoper":
return True
return False
def parseParams(self, server, params, prefix, tags):
if len(params) != 2:
return None
if prefix not in self.ircd.servers:
return None
if params[0] not in self.ircd.users:
# Since this should always come from the server the user is on, we don't need to worry about recently quit users
return None
return {
"fromserver": self.ircd.servers[prefix],
"user": self.ircd.users[params[0]],
"reason": params[1]
}
def execute(self, server, data):
user = data["user"]
reason = data["reason"]
fromServer = data["fromserver"]
snodata = {
"mask": "remoteoper",
"message": "Failed OPER attempt from {} ({}) (from {})".format(user.nick, reason, fromServer.name)
}
self.ircd.runActionProcessing("sendservernotice", snodata)
self.ircd.broadcastToServers(server, "OPERFAILNOTICE", user.uuid, reason, prefix=fromServer.serverID)
return True
snoOper = SnoOper() | [
"zope.interface.implements"
] | [((190, 232), 'zope.interface.implements', 'implements', (['IPlugin', 'IModuleData', 'ICommand'], {}), '(IPlugin, IModuleData, ICommand)\n', (200, 232), False, 'from zope.interface import implements\n')] |
import os
import re
import string
from graph import Graph, vertex
import random
def get_words_from_text(text_path):
with open(text_path, 'r') as f:
text = f.read()
text = ' '.join(text.split())
text = text.lower
text = text.translate(str.maketrans('', '', string.punctuation))
words = text.split()
return words
def make_graph(words):
g = Graph()
previous_word = None
# get the words
for word in words:
word_vertex = g.get_vertex(word)
# if there is a previous word, then add it
if previous_word:
previous_word.increment_edge(word_vertex)
# set the previous word and iterate
previous_word = word_vertex
g.generate_probability_mappings()
return g
def compose(g, words, length = 50):
composition = []
# to pick a random word to start
word = g.get_vertex(random.choice(words))
for _ in range(length):
composition.append(word.value)
word =g.get_next_word(word)
return composition
# to get the word from the graph
def main():
words = get_words_from_text('texts/hp_sorcerer_stone.txt')
g = make_graph(words)
composition = compose(g, words, 100)
return ' '.join(composition)
if __name__ == '__main__':
print(main())
| [
"random.choice",
"graph.Graph"
] | [((395, 402), 'graph.Graph', 'Graph', ([], {}), '()\n', (400, 402), False, 'from graph import Graph, vertex\n'), ((893, 913), 'random.choice', 'random.choice', (['words'], {}), '(words)\n', (906, 913), False, 'import random\n')] |
from di import Container, Dependant, Depends
class Request:
...
class DBConnection:
def __init__(self, request: Request) -> None:
...
def controller(conn: DBConnection = Depends(scope="app")) -> None:
...
def framework() -> None:
container = Container()
with container.enter_scope("app"):
with container.enter_scope("request"):
request = Request()
with container.bind(Dependant(lambda: request, scope="request"), Request):
container.execute_sync(container.solve(Dependant(controller)))
| [
"di.Depends",
"di.Dependant",
"di.Container"
] | [((192, 212), 'di.Depends', 'Depends', ([], {'scope': '"""app"""'}), "(scope='app')\n", (199, 212), False, 'from di import Container, Dependant, Depends\n'), ((274, 285), 'di.Container', 'Container', ([], {}), '()\n', (283, 285), False, 'from di import Container, Dependant, Depends\n'), ((436, 480), 'di.Dependant', 'Dependant', (['(lambda : request)'], {'scope': '"""request"""'}), "(lambda : request, scope='request')\n", (445, 480), False, 'from di import Container, Dependant, Depends\n'), ((546, 567), 'di.Dependant', 'Dependant', (['controller'], {}), '(controller)\n', (555, 567), False, 'from di import Container, Dependant, Depends\n')] |
from decimal import Decimal
import time
from typing import Union, Tuple, List
from dimka.bot.base_bot import BaseBot
from dimka.core.app import RestartBotException
import dimka.core.models as models
from dimka.core.utils import td
from dimka.core.dto import Order
MAX_ORDERS = 3
class Bot(BaseBot):
"""
This bot trade in three steps.
"""
def run(self):
self.logger.success('************* "{}" bot started: {} *************'.format(
self.params.get('bot_name'),
self.pair,
))
# Bot parameters
base_funds, quote_funds = self.funds()
self.logger.verbose("Available funds")
self.logger.verbose(" Base: ({}): {:f}".format(
self.pair_info.baseAsset,
td(base_funds, self.pair_info.baseAssetPrecision)
))
self.logger.verbose(" Quote ({}): {:f}".format(
self.pair_info.quoteAsset,
td(quote_funds, self.pair_info.quotePrecision)
))
sell_orders = self.active_orders(self.client.SIDE_SELL)
self.logger.success("Active SELL orders: {}".format(len(sell_orders)))
self.show_orders_info(sell_orders)
self.cancel_buy_orders()
last_buy_order = None
# Check quote funds - is it possible to open BUY order?
if quote_funds > self.pair_info.minAmount:
self.logger.success("Check possibility to open BUY order")
# Calculate allowed buy price and create buy order
# Waiting for it execution or restart bot
top_price = self.top_sell_price()
self.logger.success(" Top SELL price: {:f}".format(
td(top_price, self.pair_info.quotePrecision)
))
self.logger.success(" Price increase unit: {:f}".format(self.get_price_unit()))
price = top_price - self.get_price_unit()
self.logger.success(" BUY price: {:f}".format(price))
amount = td(quote_funds / price, self.pair_info.baseAssetPrecision)
self.logger.success(" BUY amount: {:f}".format(amount))
buy_allowed, message = self.is_buy_allowed(price)
if not self.is_buy_allowed(price):
raise RestartBotException(message, timeout=30)
self.logger.success("Start BUY")
trade = self.create_buy_order(price, amount)
time.sleep(1)
buy_state, order_info = self.waiting_order_execution(
trade.orderId,
self.args.iters,
self.args.iters_time,
)
if not buy_state:
self.logger.success(" Cancel order #{}".format(trade.orderId))
self.client.cancel_order(
symbol=self.pair,
orderId=trade.orderId,
)
# SELL
self.logger.success("Starting SELL")
base_funds, quote_funds = self.funds()
self.logger.verbose("Available funds")
self.logger.verbose(" Base: ({}): {:f}".format(
self.pair_info.baseAsset,
td(base_funds, self.pair_info.baseAssetPrecision)
))
self.logger.verbose(" Quote ({}): {:f}".format(
self.pair_info.quoteAsset,
td(quote_funds, self.pair_info.quotePrecision)
))
if base_funds > self.pair_info.minAmount:
# sell_amount = base_funds / Decimal(str(MAX_ORDERS - sell_len))
sell_amount = base_funds
self.logger.success(" Sell amount: {:f}".format(
td(sell_amount, self.pair_info.baseAssetPrecision)
))
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# TODO: CONTINUE FROM HERE
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if not last_buy_order:
last_buy_order = self.get_last_local_buy_order()
# Calculate orders quantity
self.logger.success(" Calculate orders quantity")
orders_count = 1
order_amount = sell_amount
for i in range(MAX_ORDERS, 0, -1):
order_amount = sell_amount / Decimal(str(i))
if order_amount > self.pair_info.min_amount:
orders_count = i
break
self.logger.success(" quantity: {}, order amount: {:f}".format(
orders_count,
td(order_amount, self.units())
))
sell_factor = Decimal(str(self.args.step / 100))
self.logger.debug(" Calculate SELL price")
prev_price = self.find_sell_price(last_buy_order)
for i in range(0, orders_count, 1):
self.logger.debug(" Order #{}".format(i + 1))
step_amount = sell_factor * prev_price
self.logger.debug(" Step amount: {:f}".format(td(step_amount, self.pair_info.decimal_places)))
sell_price = prev_price + step_amount
prev_price = sell_price
self.logger.debug(" SELL price: {:f}".format(td(sell_price, self.pair_info.decimal_places)))
sell_res = self.create_sell_order(sell_price, order_amount)
time.sleep(1)
if not sell_res.order_id:
order = self.get_last_order_from_history('sell')
else:
with wexapi.common.WexConnection() as conn:
t = wexapi.trade.TradeApi(self.key, self.key_handler, conn)
order = t.order_info(sell_res.order_id)
self.save_order(order, last_buy_order)
else:
msg = "{} funds is not enough to open SELL order. Min. amount is: {}".format(
td(base_funds, self.units()),
td(self.pair_info.min_amount, self.units()),
)
raise RestartBotException(msg, timeout=10)
def is_buy_allowed(self, price: Decimal) -> Tuple[bool, str]:
"""
Check is buy allowed
:param: order buy price
:return: state, message
"""
low, high = self.low_high_daily_prices()
high_diff = Decimal(str(abs(self.args.high_diff) / 100))
allowed_price = (high - low) * high_diff
if price > allowed_price:
return False, 'Buy is not allowed, because high price is close'
return True, ''
def show_orders_info(self, orders: List[Order]):
"""
Show orders details
Args:
orders (list): orders list wexapi.models.Order
Returns:
None: Only output info by logger
"""
for order in orders:
self.logger.verbose(" Order #{}".format(order.orderId))
self.logger.verbose(" symbol:{} | side:{} | amount:{:f} | price:{:f} | status:{}".format(
order.symbol,
order.side,
td(order.origQty, self.pair_info.baseAssetPrecision),
td(order.price, self.pair_info.quotePrecision),
order.status,
))
def waiting_order_execution(self, order_id: int, iter_count: int, iter_time: int) -> bool:
"""
Waiting for order execution.
0 - order was completely satisfied with the counter orders
:param order_id:
:param iter_count: iterations count to check is order executed.
:param iter_time: time in seconds for each iteration (time to sleep after iteration).
:return: order execution state: True - executed, False - not executed
"""
order = self.client.get_order(symbol=self.pair, orderId=order_id)
while iter_count > 0:
time.sleep(iter_time)
order = self.client.get_order(symbol=self.pair, orderId=order_id)
if order.status in ['FILLED', 'CANCELED', 'PENDING_CANCEL', 'REJECTED', 'EXPIRED']:
# order executed. Exist from while
iter_count = 0
else:
iter_count -= 1
self.logger.debug(" Left iterations: {}".format(iter_count))
else:
return order.status in ['FILLED', 'CANCELED', 'PENDING_CANCEL', 'REJECTED', 'EXPIRED']
def get_last_order_from_history(self, order_type: str) -> Union[None, wex_models.Order]:
"""
:param order_type: buy OR sell
"""
with wexapi.common.WexConnection() as conn:
t = wexapi.trade.TradeApi(self.key, self.key_handler, conn)
history = t.trade_history(pair=self.pair, count_number=100)
for item in history:
if item.is_your_order and item.type == order_type:
return t.order_info(item.order_id)
# raise Exception(
# "Wex history doesn't contains {} order for pair {}. Are you doing something wrong?".format(
# type,
# self.pair,
# )
# )
return None
def find_sell_price(self, last_buy_order: models.OrderInfo = None):
if not last_buy_order:
return self.top_buy_price()
return last_buy_order.rate
def get_last_local_buy_order(self) -> Union[None, models.OrderInfo]:
last_hist_order = self.get_last_order_from_history('buy')
result = None
if last_hist_order:
try:
result = (models.OrderInfo
.select()
.where(models.OrderInfo.order_type == 'buy', models.OrderInfo.pair == self.pair)
.order_by(models.OrderInfo.created.desc())
.get())
except models.DoesNotExist:
pass
return result
| [
"dimka.core.utils.td",
"dimka.core.app.RestartBotException",
"dimka.core.models.OrderInfo.select",
"time.sleep",
"dimka.core.models.OrderInfo.created.desc"
] | [((1968, 2026), 'dimka.core.utils.td', 'td', (['(quote_funds / price)', 'self.pair_info.baseAssetPrecision'], {}), '(quote_funds / price, self.pair_info.baseAssetPrecision)\n', (1970, 2026), False, 'from dimka.core.utils import td\n'), ((2384, 2397), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2394, 2397), False, 'import time\n'), ((5858, 5894), 'dimka.core.app.RestartBotException', 'RestartBotException', (['msg'], {'timeout': '(10)'}), '(msg, timeout=10)\n', (5877, 5894), False, 'from dimka.core.app import RestartBotException\n'), ((7674, 7695), 'time.sleep', 'time.sleep', (['iter_time'], {}), '(iter_time)\n', (7684, 7695), False, 'import time\n'), ((763, 812), 'dimka.core.utils.td', 'td', (['base_funds', 'self.pair_info.baseAssetPrecision'], {}), '(base_funds, self.pair_info.baseAssetPrecision)\n', (765, 812), False, 'from dimka.core.utils import td\n'), ((932, 978), 'dimka.core.utils.td', 'td', (['quote_funds', 'self.pair_info.quotePrecision'], {}), '(quote_funds, self.pair_info.quotePrecision)\n', (934, 978), False, 'from dimka.core.utils import td\n'), ((2228, 2268), 'dimka.core.app.RestartBotException', 'RestartBotException', (['message'], {'timeout': '(30)'}), '(message, timeout=30)\n', (2247, 2268), False, 'from dimka.core.app import RestartBotException\n'), ((3095, 3144), 'dimka.core.utils.td', 'td', (['base_funds', 'self.pair_info.baseAssetPrecision'], {}), '(base_funds, self.pair_info.baseAssetPrecision)\n', (3097, 3144), False, 'from dimka.core.utils import td\n'), ((3264, 3310), 'dimka.core.utils.td', 'td', (['quote_funds', 'self.pair_info.quotePrecision'], {}), '(quote_funds, self.pair_info.quotePrecision)\n', (3266, 3310), False, 'from dimka.core.utils import td\n'), ((5199, 5212), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5209, 5212), False, 'import time\n'), ((1673, 1717), 'dimka.core.utils.td', 'td', (['top_price', 'self.pair_info.quotePrecision'], {}), '(top_price, self.pair_info.quotePrecision)\n', (1675, 1717), False, 'from dimka.core.utils import td\n'), ((3565, 3615), 'dimka.core.utils.td', 'td', (['sell_amount', 'self.pair_info.baseAssetPrecision'], {}), '(sell_amount, self.pair_info.baseAssetPrecision)\n', (3567, 3615), False, 'from dimka.core.utils import td\n'), ((6899, 6951), 'dimka.core.utils.td', 'td', (['order.origQty', 'self.pair_info.baseAssetPrecision'], {}), '(order.origQty, self.pair_info.baseAssetPrecision)\n', (6901, 6951), False, 'from dimka.core.utils import td\n'), ((6969, 7015), 'dimka.core.utils.td', 'td', (['order.price', 'self.pair_info.quotePrecision'], {}), '(order.price, self.pair_info.quotePrecision)\n', (6971, 7015), False, 'from dimka.core.utils import td\n'), ((4851, 4897), 'dimka.core.utils.td', 'td', (['step_amount', 'self.pair_info.decimal_places'], {}), '(step_amount, self.pair_info.decimal_places)\n', (4853, 4897), False, 'from dimka.core.utils import td\n'), ((5058, 5103), 'dimka.core.utils.td', 'td', (['sell_price', 'self.pair_info.decimal_places'], {}), '(sell_price, self.pair_info.decimal_places)\n', (5060, 5103), False, 'from dimka.core.utils import td\n'), ((9573, 9604), 'dimka.core.models.OrderInfo.created.desc', 'models.OrderInfo.created.desc', ([], {}), '()\n', (9602, 9604), True, 'import dimka.core.models as models\n'), ((9377, 9402), 'dimka.core.models.OrderInfo.select', 'models.OrderInfo.select', ([], {}), '()\n', (9400, 9402), True, 'import dimka.core.models as models\n')] |
from glob import glob
import tarfile, shutil
def main(args, settings):
backup_dir = settings.get('BACKUP_DIR')
timestamp = settings.get('TIMESTAMP')
archive_file = '{0}/{1}.tar.gz'.format(backup_dir, timestamp)
backup_files = list()
for folder_name in ['folders', 'datasources', 'dashboards', 'alert_channels']:
backup_path = '{0}/{1}/{2}'.format(backup_dir, folder_name, timestamp)
for file_path in glob(backup_path):
backup_files.append(file_path)
with tarfile.open(archive_file, "x:gz") as tar:
for file_path in backup_files:
tar.add(file_path)
shutil.rmtree(file_path)
tar.close()
print('created archive: {0}'.format(archive_file))
| [
"tarfile.open",
"glob.glob",
"shutil.rmtree"
] | [((441, 458), 'glob.glob', 'glob', (['backup_path'], {}), '(backup_path)\n', (445, 458), False, 'from glob import glob\n'), ((513, 547), 'tarfile.open', 'tarfile.open', (['archive_file', '"""x:gz"""'], {}), "(archive_file, 'x:gz')\n", (525, 547), False, 'import tarfile, shutil\n'), ((638, 662), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (651, 662), False, 'import tarfile, shutil\n')] |
import random
import time
from pydent.utils.async_requests import make_async
def test_async_basic():
"""Expect array to be return in correct order."""
@make_async(3)
def myfxn(arr):
t = round(random.random(), 2)
time.sleep(t)
return arr
result = myfxn(range(100))
assert result == list(range(100))
def test_async_with_args():
@make_async(2)
def myfxn(arr, arg0):
time.sleep(0.5)
return [arg0] * len(arr)
result = myfxn(range(10), 5)
assert result == [5] * 10
def test_async_with_kwargs():
@make_async(2)
def myfxn(arr, arg0, kwarg0=1):
time.sleep(0.5)
return [arg0 * kwarg0] * len(arr)
result = myfxn(range(10), 5)
result2 = myfxn(range(10), 5, kwarg0=2)
assert result == [5] * 10
assert result2 == [10] * 10
def test_async_class_method():
class Foo:
@make_async(2, as_classmethod=True)
def myfxn(self, arr, arg0, kwarg0=1):
time.sleep(0.5)
return [arg0 * kwarg0] * len(arr)
foo = Foo()
result = foo.myfxn(range(10), 5)
result2 = foo.myfxn(range(10), 5, kwarg0=2)
assert result == [5] * 10
assert result2 == [10] * 10
def test_async_data_pos():
@make_async(2, data_pos=2)
def myfxn(arg0, arg1, arr, arg2, kwarg0=1):
time.sleep(0.5)
return [arg0 * kwarg0 * arg1 * arg2] * len(arr)
result = myfxn(1, 2, range(10), 3)
result2 = myfxn(1, 2, range(10), 3, kwarg0=2)
assert result == [6] * 10
assert result2 == [12] * 10
| [
"random.random",
"time.sleep",
"pydent.utils.async_requests.make_async"
] | [((164, 177), 'pydent.utils.async_requests.make_async', 'make_async', (['(3)'], {}), '(3)\n', (174, 177), False, 'from pydent.utils.async_requests import make_async\n'), ((383, 396), 'pydent.utils.async_requests.make_async', 'make_async', (['(2)'], {}), '(2)\n', (393, 396), False, 'from pydent.utils.async_requests import make_async\n'), ((582, 595), 'pydent.utils.async_requests.make_async', 'make_async', (['(2)'], {}), '(2)\n', (592, 595), False, 'from pydent.utils.async_requests import make_async\n'), ((1250, 1275), 'pydent.utils.async_requests.make_async', 'make_async', (['(2)'], {'data_pos': '(2)'}), '(2, data_pos=2)\n', (1260, 1275), False, 'from pydent.utils.async_requests import make_async\n'), ((244, 257), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (254, 257), False, 'import time\n'), ((431, 446), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (441, 446), False, 'import time\n'), ((640, 655), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (650, 655), False, 'import time\n'), ((896, 930), 'pydent.utils.async_requests.make_async', 'make_async', (['(2)'], {'as_classmethod': '(True)'}), '(2, as_classmethod=True)\n', (906, 930), False, 'from pydent.utils.async_requests import make_async\n'), ((1332, 1347), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1342, 1347), False, 'import time\n'), ((216, 231), 'random.random', 'random.random', ([], {}), '()\n', (229, 231), False, 'import random\n'), ((989, 1004), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (999, 1004), False, 'import time\n')] |
#!/usr/bin/env python3
# Ukljucivanje sistemskog modula
from sys import exit as greska
# Ukljucivanje modula za matematiku
import numpy as np
import numpy.linalg as LA
# Normalizacija tacaka
def normalizuj(tacke):
# Afinizacija tacaka
tacke = np.array([*map(lambda x:
(x[0]/x[2], x[1]/x[2], 1),
tacke)])
# Teziste tacaka
tez = np.mean(tacke, axis = 0)
# Matrica translacije
g = np.array([[ 1, 0, -tez[0]],
[ 0, 1, -tez[1]],
[ 0, 0, 1 ]])
# Transliranje svih tacaka
tacke = np.array([*map(lambda x: g@x, tacke)])
# Prosek rastojanja
rast = np.mean(np.array([*map(lambda x:
np.sqrt(x[0]**2 + x[1]**2),
tacke)]))
# Matrica homotetije
s = np.array([[np.sqrt(2)/rast, 0, 0 ],
[ 0, np.sqrt(2)/rast, 0 ],
[ 0, 0, 1 ]])
# Skaliranje svih tacaka;
# vracanje i transformacije
return s@g, np.array([*map(lambda x: s@x, tacke)])
# Fja za 3D rekonstrukciju
def rekonstruisi():
#####################################################
##### 3D REKONSTRUKCIJA IZ RAVANSKIH PROJEKCIJA #####
#####################################################
# Piksel koordinate vidljivih tacaka (osam), na
# osnovu kojih se odredjuje fundamentalna matrica
x1 = np.array([331, 76, 1.])
y1 = np.array([393, 75, 1.])
x2 = np.array([496, 55, 1.])
y2 = np.array([561, 76, 1.])
x3 = np.array([717, 167, 1.])
y3 = np.array([565, 198, 1.])
x4 = np.array([539, 188, 1.])
y4 = np.array([373, 196, 1.])
x19 = np.array([923, 600, 1.])
y19 = np.array([860, 655, 1.])
x20 = np.array([701, 780, 1.])
y20 = np.array([457, 778, 1.])
x23 = np.array([920, 786, 1.])
y23 = np.array([856, 839, 1.])
x24 = np.array([696, 988, 1.])
y24 = np.array([462, 977, 1.])
# Vektori tih tacaka
xx = np.array([x1, x2, x3, x4, x19, x20, x23, x24])
yy = np.array([y1, y2, y3, y4, y19, y20, y23, y24])
# Normalizacija tacaka
tx, xxx = normalizuj(xx)
ty, yyy = normalizuj(yy)
# Jednacina y^T * F * x = 0, gde su nepoznate
# koeficijenti trazene fundamentalne matrice
jed = lambda x, y: np.array([np.outer(y, x).flatten()])
# Matrica formata 8x9 koja predstavlja osam
# jednacina dobijenih iz korespondencija
jed8 = np.concatenate([jed(x, y) for x, y in zip(xxx, yyy)])
# DLT algoritam, SVD dekompozicija
SVDJed8 = LA.svd(jed8)
# Vektor koeficijenata fundamentalne matrice,
# dobijen kao poslednja vrsta matrice V^T
Fvector = SVDJed8[-1][-1]
# Fundamentalna matrica napravljena od vektora
FFt = Fvector.reshape(3, 3)
#####################################################
##### POSTIZANJE USLOVA DET(FF)=0, SING. CONST. #####
#####################################################
# SVD dekompozicija fundamentalne matrice
Ut, DDt, VTt = LA.svd(FFt)
# Zeljena matrica je singularna
DD1t = np.diag([1, 1, 0]) @ DDt
DD1t = np.diag(DD1t)
# Zamena matrice DD novom DD1 i tako
# dobijanje nove fundamentalne matrice
FF1t = Ut @ DD1t @ VTt
# Vracanje u pocetni koordinatni sistem
FF = ty.T @ FF1t @ tx
#####################################################
##### ODREDJIVANJE EPIPOLOVA ########################
#####################################################
# SVD dekompozicija fundamentalne matrice
U, _, VT = LA.svd(FF)
# Treca vrsta V^T je prvi epipol, resenje jednacine
# F * e1 = 0, najmanja sopstvena vrednost matrice
e1 = VT[-1]
# Afine koordinate prvog epipola
norm = lambda tacka: tacka/tacka[-1]
e1 = norm(e1)
# Za drugi epipol ne resavamo F^T * e2 = 0,
# vec primecujemo da je SVD dekompozicija F^T
# transponat one od F, tako da je drugi epipol
# poslednja kolona matrice U prvog razlaganja
e2 = U[:, -1]
# Afine koordinate drugog epipola
e2 = norm(e2)
###################################################
##### REKONSTRUKCIJA SKRIVENIH TACAKA #############
###################################################
# Preostale vidljive tacke
x5 = np.array([327, 295, 1.])
x7 = np.array([713, 401, 1.])
y7 = np.array([565, 423, 1.])
x8 = np.array([539, 431, 1.])
y8 = np.array([377, 422, 1.])
x9 = np.array([261, 340, 1.])
y9 = np.array([282, 311, 1.])
y10 = np.array([712, 332, 1.])
x11 = np.array([775, 367, 1.])
y11 = np.array([685, 403, 1.])
x12 = np.array([310, 416, 1.])
y12 = np.array([237, 379, 1.])
x13 = np.array([268, 590, 1.])
y14 = np.array([713, 566, 1.])
x15 = np.array([766, 619, 1.])
y15 = np.array([684, 644, 1.])
x16 = np.array([315, 670, 1.])
y16 = np.array([247, 616, 1.])
x17 = np.array([ 91, 628, 1.])
y17 = np.array([125, 552, 1.])
x21 = np.array([ 94, 824, 1.])
y21 = np.array([131, 720, 1.])
# Neophodno je naci koordinate nevidljivih tacaka
krstx = lambda a, b, c, d, e, f, g, h, i, j: np.cross(
np.cross(np.cross(np.cross(a, b), np.cross(c, d)), e),
np.cross(np.cross(np.cross(f, g), np.cross(h, i)), j))
krst = lambda *args: np.round(norm(krstx(*args)))
# Nevidljive tacke prve projekcije
x6 = krst( x5, x1, x8, x4, x2,
x8, x5, x3, x2, x7)
x10 = krst(x16, x13, x12, x9, x11,
x12, x11, x16, x15, x9)
x14 = krst(x16, x15, x12, x11, x13,
x16, x13, x12, x9, x15)
x18 = krst(x20, x19, x24, x23, x17,
x24, x21, x20, x17, x19)
x22 = krst(x20, x19, x24, x23, x21,
x24, x21, x20, x17, x23)
# Nevidljive tacke druge projekcije
y5 = krst( y8, y4, y7, y3, y1,
y4, y1, y3, y2, y8)
y6 = krst( y5, y1, y8, y4, y2,
y8, y5, y3, y2, y7)
y13 = krst(y15, y16, y10, y9, y14,
y16, y12, y15, y11, y9)
y18 = krst(y20, y19, y24, y23, y17,
y24, y21, y20, y17, y19)
y22 = krst(y20, y19, y24, y23, y21,
y24, y21, y20, y17, y23)
###################################################
##### TRIANGULACIJA ###############################
###################################################
# Kanonska matrica kamere
T1 = np.hstack([np.eye(3), np.zeros(3).reshape(3, 1)])
# Matrica vektorskog mnozenja
vec = lambda p: np.array([[ 0, -p[2], p[1]],
[ p[2], 0, -p[0]],
[-p[1], p[0], 0 ]])
# Matrica drugog epipola
E2 = vec(e2)
# Druga matrica kamere
T2 = np.hstack([E2 @ FF, e2.reshape(3, 1)])
# Za svaku tacku po sistem od cetiri jednacine
# sa cetiri homogene nepoznate, mada mogu i tri
jednacine = lambda xx, yy: np.array([ xx[1]*T1[2] - xx[2]*T1[1],
-xx[0]*T1[2] + xx[2]*T1[0],
yy[1]*T2[2] - yy[2]*T2[1],
-yy[0]*T2[2] + yy[2]*T2[0]])
# Afine 3D koordinate
UAfine = lambda xx: (xx/xx[-1])[:-1]
# Fja koja vraca 3D koordinate rekonstruisane tacke
TriD = lambda xx, yy: UAfine(LA.svd(jednacine(xx, yy))[-1][-1])
# Piksel koordinate sa obe slike
slika1 = np.array([x1, x2, x3, x4, x5, x6, x7, x8, x9,
x10, x11, x12, x13, x14, x15, x16,
x17, x18, x19, x20, x21, x22, x23, x24])
slika2 = np.array([y1, y2, y3, y4, y5, y6, y7, y8, y9,
y10, y11, y12, y13, y14, y15, y16,
y17, y18, y19, y20, y21, y22, y23, y24])
# Rekonstruisane 3D koordinate tacaka
rekonstruisane = np.array([TriD(x, y) for x, y
in zip(slika1, slika2)])
# Skaliranje z-koordinate u cilju poboljsanja rezultata
rekonstruisane380 = np.array([*map(lambda x:
np.diag([1, 1, 380]) @ x,
rekonstruisane)])
# Ivice rekonstruisanih objekata
iviceMala = np.array([[1, 2], [2, 3], [3, 4], [4, 1],
[5, 6], [6, 7], [7, 8], [8, 5],
[1, 5], [2, 6], [3, 7], [4, 8]])
iviceSrednja = np.array([[ 9, 10], [10, 11], [11, 12], [12, 9],
[13, 14], [14, 15], [15, 16], [16, 13],
[ 9, 13], [10, 14], [11, 15], [12, 16]])
iviceVelika = np.array([[17, 18], [18, 19], [19, 20], [20, 17],
[21, 22], [22, 23], [23, 24], [24, 21],
[17, 21], [18, 22], [19, 23], [20, 24]])
# Vracanje rezultata
return rekonstruisane380, iviceMala, iviceSrednja, iviceVelika
# Ispitivanje globalne promenljive koja sadrzi
# ime programa kako bi se znalo da li je pravilno
# pokrenut, a ne npr. samo importovan ili uzet
# kao ulaz programu koji interpretira kod
if __name__ == '__main__':
greska('Tridrek nije samostalan program! Pokrenite main!')
| [
"numpy.mean",
"numpy.eye",
"numpy.sqrt",
"numpy.cross",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"sys.exit",
"numpy.linalg.svd"
] | [((406, 428), 'numpy.mean', 'np.mean', (['tacke'], {'axis': '(0)'}), '(tacke, axis=0)\n', (413, 428), True, 'import numpy as np\n'), ((465, 520), 'numpy.array', 'np.array', (['[[1, 0, -tez[0]], [0, 1, -tez[1]], [0, 0, 1]]'], {}), '([[1, 0, -tez[0]], [0, 1, -tez[1]], [0, 0, 1]])\n', (473, 520), True, 'import numpy as np\n'), ((1503, 1527), 'numpy.array', 'np.array', (['[331, 76, 1.0]'], {}), '([331, 76, 1.0])\n', (1511, 1527), True, 'import numpy as np\n'), ((1537, 1561), 'numpy.array', 'np.array', (['[393, 75, 1.0]'], {}), '([393, 75, 1.0])\n', (1545, 1561), True, 'import numpy as np\n'), ((1571, 1595), 'numpy.array', 'np.array', (['[496, 55, 1.0]'], {}), '([496, 55, 1.0])\n', (1579, 1595), True, 'import numpy as np\n'), ((1605, 1629), 'numpy.array', 'np.array', (['[561, 76, 1.0]'], {}), '([561, 76, 1.0])\n', (1613, 1629), True, 'import numpy as np\n'), ((1639, 1664), 'numpy.array', 'np.array', (['[717, 167, 1.0]'], {}), '([717, 167, 1.0])\n', (1647, 1664), True, 'import numpy as np\n'), ((1673, 1698), 'numpy.array', 'np.array', (['[565, 198, 1.0]'], {}), '([565, 198, 1.0])\n', (1681, 1698), True, 'import numpy as np\n'), ((1707, 1732), 'numpy.array', 'np.array', (['[539, 188, 1.0]'], {}), '([539, 188, 1.0])\n', (1715, 1732), True, 'import numpy as np\n'), ((1741, 1766), 'numpy.array', 'np.array', (['[373, 196, 1.0]'], {}), '([373, 196, 1.0])\n', (1749, 1766), True, 'import numpy as np\n'), ((1775, 1800), 'numpy.array', 'np.array', (['[923, 600, 1.0]'], {}), '([923, 600, 1.0])\n', (1783, 1800), True, 'import numpy as np\n'), ((1809, 1834), 'numpy.array', 'np.array', (['[860, 655, 1.0]'], {}), '([860, 655, 1.0])\n', (1817, 1834), True, 'import numpy as np\n'), ((1843, 1868), 'numpy.array', 'np.array', (['[701, 780, 1.0]'], {}), '([701, 780, 1.0])\n', (1851, 1868), True, 'import numpy as np\n'), ((1877, 1902), 'numpy.array', 'np.array', (['[457, 778, 1.0]'], {}), '([457, 778, 1.0])\n', (1885, 1902), True, 'import numpy as np\n'), ((1911, 1936), 'numpy.array', 'np.array', (['[920, 786, 1.0]'], {}), '([920, 786, 1.0])\n', (1919, 1936), True, 'import numpy as np\n'), ((1945, 1970), 'numpy.array', 'np.array', (['[856, 839, 1.0]'], {}), '([856, 839, 1.0])\n', (1953, 1970), True, 'import numpy as np\n'), ((1979, 2004), 'numpy.array', 'np.array', (['[696, 988, 1.0]'], {}), '([696, 988, 1.0])\n', (1987, 2004), True, 'import numpy as np\n'), ((2013, 2038), 'numpy.array', 'np.array', (['[462, 977, 1.0]'], {}), '([462, 977, 1.0])\n', (2021, 2038), True, 'import numpy as np\n'), ((2072, 2118), 'numpy.array', 'np.array', (['[x1, x2, x3, x4, x19, x20, x23, x24]'], {}), '([x1, x2, x3, x4, x19, x20, x23, x24])\n', (2080, 2118), True, 'import numpy as np\n'), ((2127, 2173), 'numpy.array', 'np.array', (['[y1, y2, y3, y4, y19, y20, y23, y24]'], {}), '([y1, y2, y3, y4, y19, y20, y23, y24])\n', (2135, 2173), True, 'import numpy as np\n'), ((2626, 2638), 'numpy.linalg.svd', 'LA.svd', (['jed8'], {}), '(jed8)\n', (2632, 2638), True, 'import numpy.linalg as LA\n'), ((3085, 3096), 'numpy.linalg.svd', 'LA.svd', (['FFt'], {}), '(FFt)\n', (3091, 3096), True, 'import numpy.linalg as LA\n'), ((3179, 3192), 'numpy.diag', 'np.diag', (['DD1t'], {}), '(DD1t)\n', (3186, 3192), True, 'import numpy as np\n'), ((3607, 3617), 'numpy.linalg.svd', 'LA.svd', (['FF'], {}), '(FF)\n', (3613, 3617), True, 'import numpy.linalg as LA\n'), ((4316, 4341), 'numpy.array', 'np.array', (['[327, 295, 1.0]'], {}), '([327, 295, 1.0])\n', (4324, 4341), True, 'import numpy as np\n'), ((4350, 4375), 'numpy.array', 'np.array', (['[713, 401, 1.0]'], {}), '([713, 401, 1.0])\n', (4358, 4375), True, 'import numpy as np\n'), ((4384, 4409), 'numpy.array', 'np.array', (['[565, 423, 1.0]'], {}), '([565, 423, 1.0])\n', (4392, 4409), True, 'import numpy as np\n'), ((4418, 4443), 'numpy.array', 'np.array', (['[539, 431, 1.0]'], {}), '([539, 431, 1.0])\n', (4426, 4443), True, 'import numpy as np\n'), ((4452, 4477), 'numpy.array', 'np.array', (['[377, 422, 1.0]'], {}), '([377, 422, 1.0])\n', (4460, 4477), True, 'import numpy as np\n'), ((4486, 4511), 'numpy.array', 'np.array', (['[261, 340, 1.0]'], {}), '([261, 340, 1.0])\n', (4494, 4511), True, 'import numpy as np\n'), ((4520, 4545), 'numpy.array', 'np.array', (['[282, 311, 1.0]'], {}), '([282, 311, 1.0])\n', (4528, 4545), True, 'import numpy as np\n'), ((4554, 4579), 'numpy.array', 'np.array', (['[712, 332, 1.0]'], {}), '([712, 332, 1.0])\n', (4562, 4579), True, 'import numpy as np\n'), ((4588, 4613), 'numpy.array', 'np.array', (['[775, 367, 1.0]'], {}), '([775, 367, 1.0])\n', (4596, 4613), True, 'import numpy as np\n'), ((4622, 4647), 'numpy.array', 'np.array', (['[685, 403, 1.0]'], {}), '([685, 403, 1.0])\n', (4630, 4647), True, 'import numpy as np\n'), ((4656, 4681), 'numpy.array', 'np.array', (['[310, 416, 1.0]'], {}), '([310, 416, 1.0])\n', (4664, 4681), True, 'import numpy as np\n'), ((4690, 4715), 'numpy.array', 'np.array', (['[237, 379, 1.0]'], {}), '([237, 379, 1.0])\n', (4698, 4715), True, 'import numpy as np\n'), ((4724, 4749), 'numpy.array', 'np.array', (['[268, 590, 1.0]'], {}), '([268, 590, 1.0])\n', (4732, 4749), True, 'import numpy as np\n'), ((4758, 4783), 'numpy.array', 'np.array', (['[713, 566, 1.0]'], {}), '([713, 566, 1.0])\n', (4766, 4783), True, 'import numpy as np\n'), ((4792, 4817), 'numpy.array', 'np.array', (['[766, 619, 1.0]'], {}), '([766, 619, 1.0])\n', (4800, 4817), True, 'import numpy as np\n'), ((4826, 4851), 'numpy.array', 'np.array', (['[684, 644, 1.0]'], {}), '([684, 644, 1.0])\n', (4834, 4851), True, 'import numpy as np\n'), ((4860, 4885), 'numpy.array', 'np.array', (['[315, 670, 1.0]'], {}), '([315, 670, 1.0])\n', (4868, 4885), True, 'import numpy as np\n'), ((4894, 4919), 'numpy.array', 'np.array', (['[247, 616, 1.0]'], {}), '([247, 616, 1.0])\n', (4902, 4919), True, 'import numpy as np\n'), ((4928, 4952), 'numpy.array', 'np.array', (['[91, 628, 1.0]'], {}), '([91, 628, 1.0])\n', (4936, 4952), True, 'import numpy as np\n'), ((4962, 4987), 'numpy.array', 'np.array', (['[125, 552, 1.0]'], {}), '([125, 552, 1.0])\n', (4970, 4987), True, 'import numpy as np\n'), ((4996, 5020), 'numpy.array', 'np.array', (['[94, 824, 1.0]'], {}), '([94, 824, 1.0])\n', (5004, 5020), True, 'import numpy as np\n'), ((5030, 5055), 'numpy.array', 'np.array', (['[131, 720, 1.0]'], {}), '([131, 720, 1.0])\n', (5038, 5055), True, 'import numpy as np\n'), ((7402, 7527), 'numpy.array', 'np.array', (['[x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17,\n x18, x19, x20, x21, x22, x23, x24]'], {}), '([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,\n x16, x17, x18, x19, x20, x21, x22, x23, x24])\n', (7410, 7527), True, 'import numpy as np\n'), ((7582, 7707), 'numpy.array', 'np.array', (['[y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13, y14, y15, y16, y17,\n y18, y19, y20, y21, y22, y23, y24]'], {}), '([y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13, y14, y15,\n y16, y17, y18, y19, y20, y21, y22, y23, y24])\n', (7590, 7707), True, 'import numpy as np\n'), ((8176, 8287), 'numpy.array', 'np.array', (['[[1, 2], [2, 3], [3, 4], [4, 1], [5, 6], [6, 7], [7, 8], [8, 5], [1, 5], [2,\n 6], [3, 7], [4, 8]]'], {}), '([[1, 2], [2, 3], [3, 4], [4, 1], [5, 6], [6, 7], [7, 8], [8, 5], [\n 1, 5], [2, 6], [3, 7], [4, 8]])\n', (8184, 8287), True, 'import numpy as np\n'), ((8353, 8485), 'numpy.array', 'np.array', (['[[9, 10], [10, 11], [11, 12], [12, 9], [13, 14], [14, 15], [15, 16], [16, \n 13], [9, 13], [10, 14], [11, 15], [12, 16]]'], {}), '([[9, 10], [10, 11], [11, 12], [12, 9], [13, 14], [14, 15], [15, 16\n ], [16, 13], [9, 13], [10, 14], [11, 15], [12, 16]])\n', (8361, 8485), True, 'import numpy as np\n'), ((8559, 8694), 'numpy.array', 'np.array', (['[[17, 18], [18, 19], [19, 20], [20, 17], [21, 22], [22, 23], [23, 24], [24,\n 21], [17, 21], [18, 22], [19, 23], [20, 24]]'], {}), '([[17, 18], [18, 19], [19, 20], [20, 17], [21, 22], [22, 23], [23, \n 24], [24, 21], [17, 21], [18, 22], [19, 23], [20, 24]])\n', (8567, 8694), True, 'import numpy as np\n'), ((9059, 9117), 'sys.exit', 'greska', (['"""Tridrek nije samostalan program! Pokrenite main!"""'], {}), "('Tridrek nije samostalan program! Pokrenite main!')\n", (9065, 9117), True, 'from sys import exit as greska\n'), ((3144, 3162), 'numpy.diag', 'np.diag', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (3151, 3162), True, 'import numpy as np\n'), ((6526, 6590), 'numpy.array', 'np.array', (['[[0, -p[2], p[1]], [p[2], 0, -p[0]], [-p[1], p[0], 0]]'], {}), '([[0, -p[2], p[1]], [p[2], 0, -p[0]], [-p[1], p[0], 0]])\n', (6534, 6590), True, 'import numpy as np\n'), ((6918, 7059), 'numpy.array', 'np.array', (['[xx[1] * T1[2] - xx[2] * T1[1], -xx[0] * T1[2] + xx[2] * T1[0], yy[1] * T2[\n 2] - yy[2] * T2[1], -yy[0] * T2[2] + yy[2] * T2[0]]'], {}), '([xx[1] * T1[2] - xx[2] * T1[1], -xx[0] * T1[2] + xx[2] * T1[0], yy\n [1] * T2[2] - yy[2] * T2[1], -yy[0] * T2[2] + yy[2] * T2[0]])\n', (6926, 7059), True, 'import numpy as np\n'), ((6433, 6442), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6439, 6442), True, 'import numpy as np\n'), ((870, 880), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (877, 880), True, 'import numpy as np\n'), ((953, 963), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (960, 963), True, 'import numpy as np\n'), ((5197, 5211), 'numpy.cross', 'np.cross', (['a', 'b'], {}), '(a, b)\n', (5205, 5211), True, 'import numpy as np\n'), ((5213, 5227), 'numpy.cross', 'np.cross', (['c', 'd'], {}), '(c, d)\n', (5221, 5227), True, 'import numpy as np\n'), ((5263, 5277), 'numpy.cross', 'np.cross', (['f', 'g'], {}), '(f, g)\n', (5271, 5277), True, 'import numpy as np\n'), ((5279, 5293), 'numpy.cross', 'np.cross', (['h', 'i'], {}), '(h, i)\n', (5287, 5293), True, 'import numpy as np\n'), ((6444, 6455), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6452, 6455), True, 'import numpy as np\n'), ((2389, 2403), 'numpy.outer', 'np.outer', (['y', 'x'], {}), '(y, x)\n', (2397, 2403), True, 'import numpy as np\n'), ((755, 785), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + x[1] ** 2)'], {}), '(x[0] ** 2 + x[1] ** 2)\n', (762, 785), True, 'import numpy as np\n'), ((8041, 8061), 'numpy.diag', 'np.diag', (['[1, 1, 380]'], {}), '([1, 1, 380])\n', (8048, 8061), True, 'import numpy as np\n')] |
import requests
def get_proxy():
return requests.get("http://127.0.0.1:5010/get_all/").json()
proxy = get_proxy()
for i in proxy:
print({"http": "http://{}".format(i.get("proxy"))})
| [
"requests.get"
] | [((47, 93), 'requests.get', 'requests.get', (['"""http://127.0.0.1:5010/get_all/"""'], {}), "('http://127.0.0.1:5010/get_all/')\n", (59, 93), False, 'import requests\n')] |
Subsets and Splits